\documentclass[twoside]{article} \usepackage{amsfonts} % used for R in Real numbers \pagestyle{myheadings} \markboth{ Exponential dichotomies for linear systems } { Ra\'ul Naulin } \begin{document} \setcounter{page}{225} \title{\vspace{-1in}\parbox{\linewidth}{\footnotesize\noindent USA-Chile Workshop on Nonlinear Analysis, \newline Electron. J. Diff. Eqns., Conf. 06, 2001, pp. 225--241\newline http://ejde.math.swt.edu or http://ejde.math.unt.edu \newline ftp ejde.math.swt.edu or ejde.math.unt.edu (login: ftp)} \vspace{\bigskipamount} \\ % Exponential dichotomies for linear systems with impulsive effects % \thanks{ {\em Mathematics Subject Classifications:} 34A05, 34E05. \hfil\break\indent {\em Key words:} Impulsive linear systems, singularly perturbed impulsive systems, \hfil\break\indent dichotomies, splitting of impulsive systems. \hfil\break\indent \copyright 2001 Southwest Texas State University. \hfil\break\indent Published January 8, 2001. \hfil\break\indent Supported by Proyecto UDO-CI-5-1003-0936/00. } } \date{} \author{ Ra\'ul Naulin } \maketitle \begin{abstract} In this paper we give conditions for the existence of a dichotomy for the impulsive equation $$\displaylines{ \mu(t,\varepsilon) x'= A(t)x, \; t \neq t_k,\cr x(t_k^+ )= C_k x(t_k^-)\,, }$$ where $\mu(t,\varepsilon)$ is a positive function such that $\lim\mu(t,\varepsilon)=0$ in some sense. The results are expressed in terms of the properties of the eigenvalues of matrices $A(t)$, the properties of the eigenvalues of matrices $\{C_k\}$ and the location of the impulsive times $\{t_k\}$ in $[0, \infty)$. \end{abstract} \newtheorem{lemma}{Lemma} \newtheorem{theo}{Theorem} \newtheorem{defi}{Definition} \section{Introduction} In this paper we study the dichotomic properties of the impulsive system \begin{eqnarray}\label{1} & \mu(t,\varepsilon) x'(t) = A(t)x(t), \quad t \neq t_k ,\; J=[0,\infty),& \\ &x(t_k^+ ) = C_k x(t_k^- ),\quad k \in \mathbb{N}=\{1,2,3,\ldots\}\,,& \nonumber \end{eqnarray} where $x(t_k^{\pm})=\lim_{t \to t_k^{\pm}}x(t)$. The function $A(\cdot)$ and the sequence $\{C_k\}$ have properties to be specified later. The function $\mu(t,\varepsilon)$ depends on a parameter $\varepsilon$, in general, belonging to a metric space $E$. We will assume that $\mu(t,\varepsilon)$, for each fixed $\varepsilon$, is continuous. The cases we are interested in most are $\mu(t,\varepsilon)=\varepsilon >0$, $\mu(t,\varepsilon)=\mu(t)$, such that $\lim_{t \rightarrow \infty} \mu(t)=0$ and $\mu(t,\varepsilon)=1$. In what follows, for technical purposes we shall suppose that \begin{equation}\label{2} 0< \mu(t,\varepsilon) \leq 1,\; \forall (t,\varepsilon) \in J \times E. \end{equation} For ordinary differential equations, the singular perturbed case ($\mu(t,\varepsilon)=\varepsilon >0$) has been intensively studied in \cite{chn,sm}; the regular case ($\mu(t,\varepsilon)=1$) has been considered in \cite{co}; the general setting of the problem (\ref{1}), when $\mu(t,\varepsilon)=\mu(t)$, $\lim_{t \to \infty}\mu(t)=0$ was studied in \cite{np3}. The aim of this paper is to give a set of algebraic conditions of existence of a $(\mu_1, \mu_2)$-dichotomy \cite{bs}, meaning by this conditions involving the properties of the functions of eigenvalues of matrices $A(t)$, the eigenvalues of matrices belonging to the sequence $\{C_k\}$, and the location of the impulsive times $\{t_k\}$. \section{Notations and basic hypotheses} In this paper $V$ stands for the field of complex numbers. We will assume that a fixed norm $\|\cdot\|$ on the space $V^n$ is defined. For a matrix $A\in V^{n \times n}$, $\|A\|$ will denote the corresponding functional matrix norm. If $m$ and $n$ are integral numbers, then the set $ \{m,m+1,m+2,\ldots, n \} $ will be denoted by $\overline{m,n}$. The symbol $\{t_k\}$ identifies a strictly increasing sequence of positive numbers, satisfying $\lim_{k \to \infty} t_k=\infty $. The solutions of all considered impulsive systems are uniformly continuous on each interval $J_k=(t_{k-1}, t_k]$. Further notations; {\flushleft {\bf -} For a bounded function $f$, we denote $\|f\|_\infty = \mbox{sup}\{\|f(t)\|: t\in J\} $, \vspace{1mm} {\bf -} For an absolutely integrable function $f$, we denote $\|f\|_1=\int_0^\infty\|f(t)\|dt$, \vspace{1mm} {\bf -} For a bounded sequence $\{C_k\}$, we denote $\|\{C_k\}\|_{\infty} =\mbox{sup}\{\|C_k\|:\;k \in \mathbb{N}\}$, \vspace{1mm} {\bf -} For a summable sequence $\{C_k\}$, we denote $\|\{C_k\}\|_1=\sum_{k=1}^\infty \|C_k\|$, \vspace{1mm} {\bf -}$C ( \{t_k\})=\{ f:J \to V^n: f \mbox{ is uniformly continuous on all intervals }J_k\}$, \vspace{1mm} {\bf -}$BC ( \{t_k\})=\{ f\in C ( \{t_k\}): f \mbox{ is bounded}\}$. \vspace{1mm} {\bf -} The function $i[s,t)$ will denote the number of impulsive times contained in the interval $[s,t)$ if $t>s$; if $s \leq t_k < t_{k+1} < \cdots < t_h < t, $ we define $$\displaylines{ \sum_{[s,t)}C_i=C_k+C_{k+2}+\cdots+C_h, \quad \sum_{[t,t)}C_i = 0, \cr \prod_{[s,t)} C_i=C_h C_{h-1} \cdots C_k, \quad\prod_{[t,t)}C_i = I\,. }$$ } We will denote by $X(t)=X(t,\varepsilon)$ the fundamental matrix of the impulsive system (\ref{1}). By this we mean a function $X: J \to V^{n \times n}$ uniformly continuous, of class $C^{1}$ on each interval $J_k$, such that $X(0^+)=I$ and $X$ satisfies (\ref{1}). The definition and basic properties of function $X(t,\varepsilon)$, for each fixed $\varepsilon$, are described in \cite{bs1,lbs}. Below, we list the basic hypotheses {\bf H1-H5} we will use. %======================H1===================== {\flushleft \bf H1: }{\it The function $A $ is bounded and piecewise uniformly continuous on $J$ with respect to $\{t_k\}$. This last means: For any $\rho >0$, there exists a number $\delta(\rho) >0$, such that $\|A(t)-A(s)\|< \rho$, if $|t-s|<\delta$, $t,s \in J_k$ for all $k\in N$. } %===========================H2======================= {\flushleft \bf H2: }{\it There exist numbers $p\geq 0$ and $q> 1$, such that $$ |i[s,t) -p(t-s)|\leq q, \; s \leq t. $$ } %======================H3======================== {\flushleft \bf H3: } {\it $\{C_k\}_{k=1}^\infty $ is a bounded sequence of invertible matrices. } %==============================H4===================== {\flushleft \bf H4: }{\it There exists a positive number $\gamma $, such that for any $k$, all eigenvalues $\mu_k$ of the matrix $C_k$ satisfy the condition $ \gamma | \mu_k |\geq 1. $ } \begin{defi} We shall say that $ \{\lambda_1, \lambda_2, \ldots,\lambda_n \} $, the eigenvalues of matrix $A$, are ordered by real parts (respectively, ordered by norms) iff $$ \mbox{Re} \lambda_1 \leq \mbox{Re}\lambda_2\leq \ldots \leq \mbox{Re}\lambda_n,\; (\mbox{respectively } |\lambda_1|\leq |\lambda_2|\leq \ldots\leq|\lambda_n|). $$ \end{defi} In the sequel, we will assume that $ \{\lambda_1(t), \lambda_2(t), \ldots,\lambda_n(t) \} $ the eigenvalues of matrix $A(t)$ are ordered by real parts, and $ \{\mu_1(k), \mu_2(k), \ldots,\mu_n(k) \} $ the eigenvalues of matrix $C_k$ are ordered by norms. We will consider the following piecewise constant function \begin{equation}\label{4} u_m: J \to \mathbb{R}, \; u_m(t)=\frac{\ln |\mu_m(k)|}{t_k-t_{k-1}}, \; \mbox{ if } t \in J_k.\end{equation} In order to alleviate the writing, let us denote for $m \in \overline{1,n-1}$ $$ \alpha_m(t,\varepsilon)=\frac{ \mbox{Re}(\lambda_m(t)-\lambda_{m+1}(t))}{\mu(t,\varepsilon)}+ u_m(t)-u_{m+1}(t). $$ The following hypothesis is a slight modification of a condition of splitting used in \cite{lm}. %000000000000000000 H-5 0000000000000000000000000 {\flushleft \bf H5: }{\it There exists a positive constant $M$ such that the function $$ \begin{array}{rcl} U_m(t, \varepsilon) & = & \displaystyle \int_{0}^t\frac{1}{\mu(s,\varepsilon)} \exp \left\{ \int_s^t\alpha_m(\tau,\varepsilon) d\tau \right\}ds, \\ & & \\ & + & \displaystyle \int_t^{+\infty} \frac{1}{\mu(s,\varepsilon)}\exp\left\{\int_t^s \alpha_m(\tau,\varepsilon) \tau \right\}ds \end{array} $$ satisfies $$ \|U_m(t, \varepsilon)\| \leq M,\; \forall (t,\varepsilon) \in [0,\infty) \times E. $$ } \section{The quasidiagonalization method} We will assume that, for some positive number $r$, the families of matrices $\{A(t): \; t \in J\}$ and $\{C_k: \: k \in \mathbb{N} \}$ are contained in the set $$ {\cal M}(r)= \{F \in V^{n \times n}: \|F\| \leq r \}. $$ For each matrix $ F \in {\cal M}(r)$ and $\sigma >0$, by Theorem 1.6 in \cite{be}, we may choose a nonsingular matrix $S$ such that \begin{equation}\label{5} S^{-1}FS=\Lambda(F)+R(F,\sigma), \; \|R(F,\sigma)\|\leq \sigma/2, \end{equation} where $\Lambda(F)$ denotes the diagonal matrix of eigenvalues of matrix $F$, ordered by real parts. Let us consider the ball $B[F, \rho]=\{G\in V^{n \times n}: \|f-G\|\leq \rho\}$. For any $G \in B[F, \rho ]$ we have $$ S^{-1}GS=\mathop{\rm Re}\Lambda(F)+i\mathop{\rm Im}\Lambda(F)+ S^{-1}(G-F)S+ R(F,\sigma), \; i^2=-1, $$ where $$ \Lambda(F)=\mathop{\rm diag}\{\lambda_1, \lambda_2, \dots, \lambda_n\}, \; \mathop{\rm Re}\Lambda(F)=\mathop{\rm diag}\{\mathop{\rm Re}\lambda_1, \mathop{\rm Re}\lambda_2, \dots,\mathop{\rm Re} \lambda_n\}. $$ >From this decomposition we obtain $$ S^{-1}GS=\mathop{\rm Re}\Lambda(G)+i\mathop{\rm Im}\Lambda(F)+ T(F,\rho)+R(F,\sigma), $$ where $$ T(F,\rho)= \left( \Lambda(F)-\Lambda(G) \right)+S^{-1}(G-F)S. $$ >From Hurwitz's theorem (see \cite{con}, page 148), the function ${\cal L}: V^{n \times n} \to V^{n \times n}$ defined by $ {\cal L}(F)=\mbox{ Re }\Lambda (F) $ is continuous. This assertion implies, for a fixed number $\sigma>0$ and a matrix $F \in {\cal M}(r)$ the existence of a nonsingular matrix $S$ and a $\rho >0$, such that if $G \in B[F, \rho]$, then $$ S^{-1}GS=\mathop{\rm Re}\Lambda(G)+i\mathop{\rm Im}\Lambda(F)+ \Gamma(F,\sigma),\; \Gamma(F,\sigma):=T(F,\rho)+R(F,\sigma), $$ and $\|\Gamma(F,\sigma)\|\leq \sigma$. Since ${\cal M}(r)$ is compact, then given a $\sigma >0$, there exist a covering ${\cal F}=\{B[F_j, \rho_j]\}_{j=1}^{m}$ of ${\cal M}(r)$, and nonsingular matrices $ {\cal S} =\left\{S_1, S_2, \dots , S_m \right\} $ having the following property: For a fixed $G \in {\cal M}(r)$ there exists an index $j\in \{1,2,\ldots,m\}$, such that $G \in B[ F_j, \rho_j]$ and \begin{equation}\label{6} S^{-1}_j G S_j=\mathop{\rm Re}\Lambda( G )+i\mathop{\rm Im}\Lambda(F_j)+ \Gamma_j(\sigma),\; \|\Gamma_j(\sigma)\| \leq \sigma. \end{equation} Let $\rho >0$ be a Lebesgue number of the covering ${\cal F}$. According to {\bf H1}, there exists a $\delta >0$, non depending on $k$, such that for $t,s \in J_k$, $|t-s|\leq \delta$ we have $\|A(t)-A(s)\| < \rho$. Let us define $$ n(k,\delta)=\mbox{inf}\{j \in \mathbb{N}: \frac{t_k-t_{k-1}}{j}\leq \delta \}, $$ and the partition of the interval $J_k$: $$ {\cal P}_k= \{t^{k}_0, t^{k}_1, \ldots , t^k_{n(k)} \},\;t^{k}_0= t_{k-1},\;t^k_{n(k)}=t_{k}, $$ defined by $$ |t^k_{i-1}- t^k_{i}|= \delta_k ,\; i\in \overline{1,n(k)}, \; \delta_k:= \frac{t_k-t_{k-1}}{n(k,\delta)} . $$ We emphasize that $n(k,\delta)=1 \mbox{ iff } t_k-t_{k-1}\leq \delta$. This and {\bf H2} yield \begin{equation}\label{7} n(k,\delta)\leq L(p,\delta) (t_k-t_{k-1}),\; L(p,\delta):=\max\{\frac{p}{q-1}, \frac{2}{\delta} \}. \end{equation} According to the decomposition (\ref{6}), we may assign to the interval $(t^k_{i-1}, t^k_i]$ a nonsingular matrix $S_{k,i}\in { \cal S }$ and $F_{k,i}\in \{F_j\}_{j=1}^m$, such that \begin{equation}\label{8} S^{-1}_{k,i} A(t)S_{k,i}= { Re }\Lambda(t)+i{ Im }\Lambda(F_{k,i})+ \Gamma_{k,i}(\sigma), \; t \in (t_{i-1}^k,t_i^k], \end{equation} where we have abbreviated $\Lambda(t)=\Lambda (A(t))$ and \begin{equation}\label{9} \|\Gamma_{k,i}(\sigma) \|\leq \sigma.\end{equation} Regarding the sequence $\{C_k\}_{k=1}^\infty$, we will accomplish a similar procedure. Let us consider a matrix $D \in {\cal M}(r )$ and $\sigma >0$. For some nonsingular matrix $T$ we will have, instead of (\ref{5}), the decomposition \begin{equation}\label{10} T^{-1}DT= N(D)+R(D,\sigma),\; \|R(D, \sigma)\| < \sigma, \end{equation} where the matrix $N(D)$ is defined by means of the eigenvalues $D$: $$ N(D)=\mathop{\rm diag}\{\mu_1, \mu_2, \dots, \mu_n\}, \;|\mu_1| \leq |\mu_2| \leq \dots \leq | \mu_n|. $$ We may write (\ref{10}) in the form $$ T^{-1}DT= |N(D)|e^{\textstyle i\,\mbox{Arg}(D)}+R(D,\sigma), $$ where $$ \mbox{Arg}(D)=\mathop{\rm diag}\{ \mbox{arg}(\mu_1), \mbox{arg}(\mu_2), \dots, \mbox{arg}(\mu_n)\} $$ and $$ |N(D)|= \mathop{\rm diag}\{|\mu_1|, |\mu_2|, \dots, |\mu_n\}|. $$ For a matrix $C \in B[D, \rho]$, $\rho > 0$, we write $$ \begin{array}{rcl} T^{-1}CT & = & |N(C)|e^{\textstyle i\,Arg(D)}+ (|N(C)|-|N(D)|)e^{\textstyle i\,Arg(D)} \\ & & \\ & + & T^{-1}(C-D)T+R(D, \sigma),\;\|R(D, \rho)\| \leq \sigma. \end{array} $$ The Hurwitz's theorem implies that the function ${\cal N}: V^{n \times n} \to V^{n \times n}$ defined by $ {\cal N}(C)=|C| $ is continuous. Since ${\cal M}(r )$ is compact, then for a given $\sigma >0$, there exists a covering ${\cal D}=\{B[D_i, \rho_i] \}_{i=1}^{\tilde m}$ of ${\cal M}(r )$, and a set of nonsingular matrices $ {\cal T}=\left\{ T_1,T_2,\ldots , T_{\tilde m} \right\}, $ such that for each $C_k$ there exists a $T_k\in {\cal T}$ and $D_k \in \{D_i\}_{i=1}^{\tilde m}$ such that \begin{equation}\label{11} T_k^{-1}C_kT_k= |N(C_k)|e^{\textstyle i\,Arg(D_k)}+ \tilde \Gamma_k( \sigma),\;\|\tilde \Gamma_k( \sigma)\| \leq \sigma. \end{equation} %00000000000000000000000000000000000000000000000000000000000000000000 %00000000000000000000 A CHANGE OF VARIABLES 0000000000000000000000000 %00000000000000000000000000000000000000000000000000000000000000000000 \section{A change of variables} Let $g: [0,1 ] \to [0,1]$ be a strictly increasing function, $g \in C^\infty$, such that $g(0)=g'(0)=g'(1)=0$, $g(1)=1$. For an ordered pair $(Q,R)$ of invertible matrices we define \[ \theta :[a,b] \to V^{n\times n},\; \theta(t) = Q\mbox{ exp } \left\{ g \left(\frac{t-a}{b-a}\right) Ln(Q^{-1} R) \right\}. \] The path $\theta $ is of class $C^\infty$. Moreover $ \theta(t) $ is a nonsingular matrix for each $ t$, and $\theta (a) = Q$, $\theta (b) = R$, $ \theta '(a) = 0,$ $ \theta '(b) = 0. $ In the sequel, we shall say that the path $\theta$ splices the ordered pair of matrices $(Q,R)$ on the interval $[a,b]$. In order to perform a change of variable of system (\ref{1}), we splice matrices $(S_{k,i},S_{k,i+1})$, $i\in \overline{1,n(k)-1}$ on an interval $[t_{i}^{k}-\nu_k(\varepsilon)\delta_{k,i}/2, t_i^k+\nu_k(\varepsilon)\delta_{k,i}/2]$, where $ \nu_k(\varepsilon)=\mbox{inf}\{\ \mu(t, \varepsilon): t \in J_k \}, $ and $\delta_{k,i} $ are small numbers satisfying $\nu_k(\varepsilon)\delta_{k,i}< \delta_k$ and another condition we will specify in the forthcoming definition of number $\nu$ (see (\ref{14}). Let us define the path $$ \theta_{k,i}: [t_{i}^{k}-\nu_k(\varepsilon)\delta_{k,i}/2,\; t_i^k + \nu_k(\varepsilon)\delta_{k,i}/2] \to V^{n \times n}$$ splicing the matrices $(S_{k,i},S_{k,i+1})$ in the following way $$ \theta_{k,i}(t)=S_{k,i} \mbox{ exp } \left\{ g \left(\frac{t-t_{i}^{k}+\nu_k(\varepsilon) \delta_{k,i}}{\mu_k(\varepsilon )\delta_{k,i}}\right) Ln(S_{k,i}^{-1} S_{k,i+1}) \right\}. $$ For the constant $$ K_1(\sigma)=\max \left\{ \left( \|S_{k}\|+ \|Ln(S_{k}^{-1} S_{i})\| \right) \mbox{ exp } \left\{ \|Ln(S_{k}^{-1} S_{i})\| \right\} : 1 \leq k,i \leq m \right\} $$ we have the estimates \begin{equation}\label{12} \| \theta_{k,i}(t)\|_\infty \leq K_1(\sigma ),\; \| \theta_{k,i}'(t)\|_\infty \leq \frac{K_1(\sigma)}{\nu_k(\varepsilon)\delta_{k,i}} .\end{equation} The matrix $T_{k+1}$ assigned to the impulsive time $t_0^{k+1}=t_{k+1}=t^{k}_{n(k)}$ and the matrix $S_{k+1,1}$ are spliced on the interval $[t_0^{k+1},t_0^{k+1}+\mu_{k+1}(\varepsilon)\delta_{k+1,0}/2]$ by a path we denote by $\theta_{k+1,0}$. The matrices $(S_{k,n(k)}, T_{k+1})$ are spliced on the interval $[t^k_{n(k)}- \nu_k(\varepsilon)\delta_{k,n(k)}/2,\;t^k_{n(k)}]$ by a path we denote by $\theta_{k,n(k)}$. We emphasize that $\theta_{k+1,0}(t_k)=T_{k_1}=\theta_{k,n(k)}(t_k)$. A special mention deserves the time $t=0$ which is not considered as an impulsive time. We will attach to the time $t=0$ the matrix $S_{1,1}$. For these splicing paths are valid similar estimates to (\ref{12}), with a modified constant for which we maintain the notation $K_1(\sigma)$. Let us define the intervals %\et{13} \begin{equation} \label{13} \begin{array}{c} I_{k} = [t_0^{k+1}-\nu_k(\varepsilon)\delta_{k,0}/2,t_0^{k+1}+ \nu_{k+1}(\varepsilon) \delta_{k+1,0}/2], k=1,2,\ldots, \\[3pt] I_{k,i} = (t_{i}^{k}-\nu_k(\varepsilon)\delta_{k,i}/2,t_i^k + \nu_k(\varepsilon) \delta_{k,i}/2), \; i\in \overline{1,n(k)-1}, \end{array} \end{equation} and the number \begin{equation}\label{14} \nu=\displaystyle \sum_{k=1}^{\infty}\sum_{i=1}^{n(k)}\delta_{k,i}.\end{equation} The choice of the numbers $\delta_{k,i}$ is at our disposal. Therefore, $\nu$ can be made as small as necessary. Let us consider the $C^\infty$ function $$ S(t)= \left\{ \begin{array}{ll} \theta_{k+1,0}(t), \; t \in [t^{k+1}_{0}, t^{k+1}_{0}+\nu_{k+1}(\varepsilon)\delta_{k,0}/2],& k= 0,1,\ldots \\[3pt] S_{k,i}, \; t \in [t^k_{i}+\nu_k(\varepsilon)\delta_{k,i}/2, t^k_{i+1}- \nu_k(\varepsilon)\delta_{k,i+1}/2], & i\in \overline{0, n(k)-1}, \\[3pt] \theta_{k,i}(t), \; t \in [t^k_{i}-\nu_k(\varepsilon) \delta_{k,i}/2,t^k_{i}+\nu_k(\varepsilon) \delta_{k,i}/2], & i\in \overline{1, n(k)-1}, \\[3pt] \theta_{k,n(k)}(t), \; t \in [t^{k}_{n(k)-1}-\nu_k(\varepsilon)\delta_{k,n(k)}/2,t_{n(k)}^k], & k= 1,2, \ldots \end{array} \right. $$ From this definition $S'(t)=0$ except on the intervals $I_k$ and $I_{k,i}$. Since $S(t_k)=T_k$, the change of variable $x=S(t)y$ reduces System (\ref{1}) to the form \begin{eqnarray}\label{15} & \mu(t,\varepsilon) y'(t) = \left( S^{-1}(t)A(t)S(t)- \mu(t,\varepsilon)S^{-1}(t)S'(t) \right) y(t), t \neq t_k ,&\\ & y(t_k^+ ) = \left(|N(C_k)|e^{\textstyle i\, \mathop{\rm Arg}(D_k)}+ \tilde \Gamma_k(\sigma) \right) y(t_k ), k \in \mathbb{N}, &\nonumber \end{eqnarray} where $\|\tilde \Gamma_k(\sigma)\| \leq \sigma$. Thus, this change of variable yields a notable simplification of the discrete component of (\ref{1}). Let us define the left continuous function $L:J\to V^{n \times n}$ by $$ L(0)=S_{1,1},\quad L(t) = S_{k,i},\quad t \in (t^k_{i-1}, t^k_{i}], \; i\in \overline{1,n(k)}. $$ From $S^{-1}(t)A(t)S(t)=L^{-1}(t)A(t)L(t)+F(t,\sigma)$, where \begin{equation}\label{16} F(t,\sigma)=S^{-1}(t)A(t)S(t)-L^{-1}(t)A(t)L(t), \end{equation} we may write System (\ref{15}) in the form \begin{eqnarray*}{rcl} &\mu(t,\varepsilon) y'(t) = \left( L^{-1}(t)A(t)L(t) + F(t,\sigma)- \mu(t,\varepsilon) S^{-1}(t)S'(t)\right)y(t), \; t \neq t_k ,\\ &y(t_k^+ ) = \left(N_ke^{\textstyle i\,\mbox{Arg}(D_k)}+ \tilde \Gamma_k(\sigma) \right) y(t_k ),\quad k \in \mathbb{N}\,.& \end{eqnarray*} From (\ref{8}) and the definition of the piecewise constant functions \begin{equation}\label{17} G(t)=Im \Lambda(F_{k,i}),\;t \in (t^k_{i-1}, t^k_{i}],\quad \Gamma(t,\sigma)=\Gamma_{k,i}(\sigma),\; t \in (t^k_{i-1}, t^k_{i}], \end{equation} we can write the last system in the form \begin{eqnarray}\label{18} &\mu(t,\varepsilon) y'(t) = \Big(\mathop{\rm Re}\Lambda(t)+i G(t) + \Gamma (t,\sigma)+F(t,\sigma) &\nonumber\\ &\hspace{2cm} - \mu(t,\varepsilon) S^{-1}(t)S'(t)\Big)y(t), \; t \neq t_k , & \\ &y(t_k^+ ) = \left(N_ke^{\textstyle i\,\mbox{Arg}(D_k)}+ \tilde \Gamma_k( \sigma) \right) y(t_k ),\quad k \in \mathbb{N}\,.& \nonumber \end{eqnarray} \begin{lemma} \begin{equation}\label{19} \|\Gamma(t,\sigma )\|_\infty \leq \sigma, \quad \|\{ \tilde \Gamma_k(\sigma)\}\|_\infty \leq \sigma, \end{equation} \begin{equation}\label{20} \|\mu(.,\varepsilon)^{-1}F(.,\sigma)\|_1 \leq K_2(\sigma)\nu , \end{equation} \begin{equation}\label{21} \int_s^t \|S^{-1}(\tau)S'(\tau) \|d\tau \leq K_3(\sigma)L(\delta,p) (t-s), \; t\geq s. \end{equation} \end{lemma} {\flushleft \bf Proof.} The first estimate of (\ref{19}) follows from the definition of function $\Gamma(t, \sigma)$ given by (\ref{17}) and (\ref{9}), and the second follows from (\ref{11}). From definition (\ref{16}), there exists a constant $K_2(\sigma)$ depending only on $\sigma$ such that $$ \| F(\cdot,\sigma)\|_\infty \leq K_2(\sigma). $$ Moreover, from (\ref{16}) we observe that $F(\cdot, \sigma)$ vanishes outside of the intervals $ I_{k,i}$ and $ I_{k}$. Therefore, from the definitions (\ref{13})-(\ref{14}) we obtain $$ \displaystyle \int_0^\infty |\frac{F(t,\sigma)}{\mu (t,\varepsilon)}|dt = \displaystyle K_2(\sigma)\Big( \sum_{i,k} \int_{ I_{k,i}}\frac{1}{\mu_k (\varepsilon)}dt+ \displaystyle \sum_{k} \int_{ I_{k}}\frac{1}{\mu_k (\varepsilon)}dt\Big) \leq K_2(\sigma) \nu\,. $$ In order to obtain (\ref{21}) we observe that $S^{-1}(t)S'(t)$ vanishes outside of the intervals $ I_{k,i}$ and $ I_{k}$. Moreover, there exists a constant $K_3(\sigma)$ depending only on $\sigma$, such that on each interval $[t^k_{i-1},t^k_i]$ we have $$ \displaystyle \int_{t_{i-1}^k}^{t_i^k} \| S^{-1}(\tau)S'(\tau) \| d\tau \leq K_3(\sigma). $$ From this estimate and (\ref{7}), it follows $$ \int_s^t \| S^{-1}(\tau)S'(\tau) \|d\tau \leq K_3(\sigma) L(p,\delta) (t-s). $$ In what follows we unify the notations of the constants $K_i(\sigma), i=1,2,3$ in a simple constant $K(\sigma)$. \section{Splitting and dichotomies} We are interested in the proof of existence of a dichotomy for the System (\ref{18}). In this task we will follow the way indicated by Coppel in \cite{co}: First we split System (\ref{18}) in two systems of lower dimensions and after this, the Gronwall inequality for piecewise continuous functions \cite{bs2} will give the required result. Following the ideas of paper \cite{np1}, we write System (\ref{18}) in the form: \begin{eqnarray}\label{22} &\mu(t,\varepsilon) y'(t) = \Big(\mathop{\rm Re}\Lambda(t)+i G(t)+ \Gamma (t,\sigma)+F(t,\sigma) &\nonumber \\ &\hspace{2cm}-\mu(t,\varepsilon) S^{-1}(t)S'(t)\Big) y(t), \quad t \neq t_k ,& \\ & \Delta y(t_k ) = \left(B_k +\hat \Gamma_k(\sigma)\right) y(t_k^+ ), \quad k \in \mathbb{N}\,,& \nonumber \end{eqnarray} where $ \Delta y(t_k ) = y(t_k^+)-y(t_k^-)$, $B_k= I-N_k^{-1}e^{\textstyle -i\,\mathop{\rm Arg}(D_k)}$, and $$ \hat \Gamma_k( \sigma)= N_k^{-1}e^{\textstyle -i\,\mbox{Arg}(D_k)} \Gamma_k(\sigma) \left( N_k e^{\textstyle i\,\mbox{Arg}(D_k)} +\Gamma_k(\sigma)\right)^{-1}. $$ From hypotheses {\bf H3}-{\bf H4} and (\ref{19}) we obtain, for a small $\sigma, $ the estimate \begin{equation}\label{23} |\hat \Gamma_k( \sigma)|\leq \frac{\sigma \gamma^2}{1-\gamma \sigma }.\end{equation} On the other hand, the fundamental matrix of system \begin{eqnarray*} &\mu(t,\varepsilon) w'(t) = \left(\mathop{\rm Re}\Lambda(t)+i G(t) \right) w(t), \quad t \neq t_k ,& \\ & \Delta w(t_k ) = B_k w(t_k^+ ), \quad k \in \mathbb{N}\, , \end{eqnarray*} coincides with the fundamental matrix $Z(t,\varepsilon)=Z(t)$ of the diagonal system \begin{eqnarray}\label{24} &\mu(t,\varepsilon) z'(t) = \left(\mathop{\rm Re}\Lambda(t)+i G(t) \right)z(t),\quad t \neq t_k,& \\ & z(t_{k}^+) = |N(C_k)|e^{\textstyle i\,\mbox{Arg}(D_k)} z(t_k ),\quad k \in \mathbb{N}\,,&\nonumber \end{eqnarray} which is equal to $Z(t):=\Phi(t) \Psi(t)$, where $$ \Psi(t)= \exp\left\{ \int_0^t \frac{ \mbox{Re}\Lambda (\tau) +i G(\tau)}{\mu(\tau,\varepsilon) } d \tau \right\}, \quad \Phi(t)= \displaystyle \prod_{[0,t)} |N(C_k)|e^{\textstyle i\,\mathop{\rm Arg}(D_k)} . $$ For the projection matrix $P=\mathop{\rm diag} \{\overbrace{1,1,\dots,1}^{m},0, \dots,0\}$, the function $\Phi$ satisfies the following estimates: $$ \|\Phi (t)P \|\leq \exp\Big\{\displaystyle \sum_{[0,t)} \ln |\mu_m(k)| \Big\}. $$ From definition (\ref{4}), we may write \begin{eqnarray*} &\|\Phi (t)P \|\leq L\exp\big\{\displaystyle \int_0^t u_m(\tau) d\tau \big\},&\\ &\|\Phi^{-1} (t)(I-P) \|\leq L\exp\big\{\displaystyle \int_t^0 u_{m+1}(\tau) d\tau \big\},& \end{eqnarray*} where $L$ is a constant depending on the condition {\bf H3} only. Since $\Phi(t)$ and $\Psi(t) $ commute with $P$, then for $t \geq s$ we obtain the following estimates \begin{eqnarray} \label{25} &\|Z(t)PZ^{-1}(s)\| \leq L_1 \exp \big\{\displaystyle \int_s^t \big( \frac{ \mbox{Re}\lambda_m } {\mu(\cdot,\varepsilon)}+u_m \big)(\tau) d\tau \big\},& \\ &\|Z(s)(I-P)Z^{-1}(t)\| \leq L_1 \exp\big\{\displaystyle \int_t^s \big(\frac{ \mbox{Re}\lambda_{m+1}} {\mu(\cdot,\varepsilon)}+u_{m+1}\big)(\tau) d\tau \big\}, \nonumber \end{eqnarray} where $L_1$ is a constant independent of $\sigma$ and $\epsilon$. In the sequel $W(t, s)$ will denote the matrix: $ W(t,s)=Z(t)Z^{-1}(s). $ >From (\ref{25}), for $t \geq s$, we have \begin{equation}\label{26} \|W(t,s)P\|\|W(s,t)(I-P)\| \leq L^2_1 \exp \big\{ \int_s^t \alpha_m(\tau, \varepsilon) d\tau \big\}. \end{equation} For a given matrix $C$, we write $ \{ C \}_1=PCP+(I-P)C(I-P). $ \begin{defi} By a splitting of System (\ref{22}), we mean the existence of a function $T:J \to V^{n \times n}$ with the following properties: {\flushleft \bf T1: } $T$ is continuously differentiable on each interval $J_k$, {\flushleft \bf T2: } For each impulsive time $t_k$, there exists the right hand side limit $ T(t_k^+), $ {\flushleft \bf T3: } $T(t)$ is invertible for each $t\in J_k$. $T(t_k^{+})$ are invertible for all $k$, {\flushleft \bf T4: } The functions $T$ and $T^{-1}$ are bounded, {\flushleft \bf T5: } The change of variables $y(t)=T(t)z(t)$ reduces System (\ref{22}) to \begin{eqnarray}\label{27} &\mu(t,\varepsilon) z'(t) = \Big(\mathop{\rm Re}\Lambda(t)+i G(t) +\{\left( \Gamma (t,\sigma)+ F(t,\sigma)\right) T(t)\}_1 & \nonumber \\ &\hspace{2cm} - \mu(t,\varepsilon)\{S^{-1}(t)S'(t)T(t)\}_1 \Big)z(t), \quad t \neq t_k ,& \\ & \Delta z(t_k ) = \left(B_k+\{ \hat \Gamma_k(\sigma)\}_1 \right) z(t_k^+ ), \quad k \in \mathbb{N}\, .&\nonumber \end{eqnarray} \end{defi} For ordinary differential equations, problem {\bf T1-T5} was solved in \cite{co}. For difference equations, it was solved in \cite{pap}. The problem of splitting for impulsive equations is treated in \cite{np1}. None of the cited works study the splitting of system (\ref{22}), where the unbounded coefficient $\{S^{-1}(t)S'(t)\}_1 $ appears. Following the general setting of \cite{co,pap,np1}, we will seek a function $T$ in the form $T(t)=I+H(t)$, where $H \in BC(\{t_k\})$, $\|H\|_\infty \leq 1/2$, such that $T$ satisfies conditions {\bf T1-T5}. In the following we use the notations $$ H_k=H(t_k), \; H_k^+=H(t_k^+). $$ Let us consider the following operators: The operator of continuous splitting \begin{eqnarray*} {\cal O}(H)(t) & = &\displaystyle \int_{t_0}^t \frac{1}{\mu(s,\varepsilon)}W(t,s)P(I-H(s))( \Gamma (s,\sigma)\\[3pt] && + F(s,\sigma))( I+H(s)) (I-P)W(s,t)ds \\[3pt] && - \displaystyle \int_{t}^\infty \frac{1}{\mu(s,\varepsilon)} W(t,s)(I-P)(I-H(s))( \Gamma (s,\sigma) \\[3pt] && + F(s,\sigma))( I+H(s)) PW(s,t)\,ds\,; \end{eqnarray*} the operator of discrete splitting \begin{eqnarray*} {\cal D}(H)(t) & = & \displaystyle \sum_{[t_0,t)}W(t,t_k)P(I-H_k)\tilde \Gamma_k(\sigma) ( I+H_k^+) (I-P)W(t_k^+,t) \\[3pt] &&- \displaystyle \sum_{[t,\infty)}W(t,t_k)(I-P)(I-H_k)\tilde \Gamma_k(\sigma) ( I+H_k^+) PW(t_k^+,t\,); \end{eqnarray*} and the operator of impulsive splitting \begin{eqnarray*} \lefteqn{{\cal S}(H)(t)}\\ & = & - \int_{t_0}^t W(t,s)P(I-H(s))( S^{-1}(s)S'(s) ( I+H(s)) (I-P)W(s,t)ds \\ && + \int_{t}^\infty W(t,s)(I-P)(I-H(s))S^{-1}(s)S(s) ( I+H(s)) PW(s,t)ds\,. \end{eqnarray*} \begin{lemma} Uniformly with respect to $t_0 \in J$, for some constant $L_2$ non depending on $\sigma$ nor on $\varepsilon$, we have the following estimates \begin{equation}\label{28} \|{\cal O}(H)\|_\infty \leq L_2( \sigma + K(\sigma ) \nu ), \end{equation} and \begin{equation}\label{29} \| {\cal D}(H)(t)\|_\infty\leq L_2 \sigma. \end{equation} \end{lemma} {\flushleft \bf Proof.} From condition {\bf H5} and (\ref{26}) we have the estimate \begin{eqnarray*} \|{\cal O}(H)(t)\| &=& \int_{t_0}^t \frac{9 L_1^2}{4 \mu(s,\varepsilon)} \exp \big\{ \int_s^t \alpha_m(\tau, \varepsilon) d\tau \big\} \left( \|\Gamma (s,\sigma)\| + \| F(s,\sigma)\|\right)ds\\ && + \int_{t}^\infty \frac{9 L_1^2}{4 \mu(s,\varepsilon)} \exp \big\{ \int_t^s \alpha_m(\tau, \varepsilon) d\tau \big\} \left( \|\Gamma (s,\sigma)\| + \| F(s,\sigma)\|\right) ds \\ &\leq& \frac{9 L_1^2}{4}\left(\sigma \|U_m(\cdot,\varepsilon)\|_\infty + \int_{t_0}^\infty \frac{ \| F(s,\sigma)\|}{ \mu(s,\varepsilon)}ds \right). \end{eqnarray*} Now the estimate (\ref{27}) follows from (\ref{19}) and {\bf H5}, for some constant $L_2$. For a fixed $t>0$, let us consider the impulsive times divided as follows: $$ t_10$, if $\|\mu(\cdot, \varepsilon)\|_\infty $ is small enough, we will have \begin{equation}\label{32} \|{\cal S}(H)(t)\| \leq \alpha \,. \end{equation} \begin{theo} The conditions {\bf H1-H5} imply, for a small values of the norm $\{\mu(\cdot, \varepsilon)\}$, the existence of a function $T:[t_0, \infty) \to V^{n \times n}$ satisfying {\bf T1-T5}. Moreover $\|T\|\leq \frac{3}{2}$, $\|T^{-1}\|\leq 2$. \end{theo} {\flushleft \bf Proof.} According to Lemma 4 and Lemma 5, the operator ${\cal T}={\cal O}+{\cal D}+{\cal S}$, for small values of $\sigma$, $\nu $ and $\alpha $ (see (\ref{32}), satisfies $$ {\cal T}: \{H \in BC(\{t_k\}): \| H \|_\infty \leq 1/2 \} \to \{H \in BC(\{t_k\}): \|H\|_\infty \leq 1/2 \}. $$ Also, for small values of $\sigma$, $\nu $ and $\alpha $ this operator is a contraction. This and further details of this theory are well known for exponential dichotomies. The corresponding result for the dichotomy (\ref{25}) are similar \cite{co,pap,np2}. \hfill$\diamondsuit$\smallskip Once we have split (\ref{18}), we write System (\ref{27}) in the form \begin{eqnarray}\label{33} &\mu(t,\varepsilon) z'(t) = \Big(\mathop{\rm Re}\Lambda(t)+i G(t) +\{\left( \Gamma (t,\sigma)+ F(t,\sigma)\right) T(t)\}_1&\nonumber \\ & \hspace{2cm}-\mu(t,\varepsilon)\{S^{-1}(t)S'(t)T(t)\}_1 \Big)z(t),\quad t \neq t_k ,&\\ & z(t_k^+ ) = \left( N_ke^{\textstyle i\,\mbox{Arg}(D_k)}+ \{ G_k(\sigma)\}_1 \right) z(t_k), \quad k \in \mathbb{N}\,,& \nonumber \end{eqnarray} where $$ G_k(\sigma)=\left( I-N_ke^{\textstyle i\, \mbox{Arg}(D_k)}\{\hat \Gamma_k(\sigma)\}_1 \right)^{-1} N_ke^{\textstyle i\,\mbox{Arg}(D_k)} -N_ke^{\textstyle i\,\mbox{Arg}(D_k)}. $$ From (\ref{23}) we obtain \begin{equation}\label{34} \|G_k(\sigma)\| \leq L_3 \sigma ,\quad L_3=2\|\{C_k\}\|_\infty,\quad \mbox{if }0 < 2 \sigma < \|\{C_k\}\|_\infty^{-1}. \end{equation} The right hand side equation of (\ref{33}) commute with projection $P$. Therefore, (\ref{33}) may be written as two systems of dimensions $m$ and $n-m$, \begin{eqnarray}\label{35} &\mu(t,\varepsilon) z'_j(t) = \Big(\mathop{\rm Re}\Lambda_j(t)+i G_j(t) + \Gamma_j (t,\sigma)+ F_j(t,\sigma)& \nonumber\\ &\hspace{2cm} +\mu(t,\varepsilon)V_j(t) \Big)z_i(t), \quad t \neq t_k ,& \\ &z_j(t_k^+ ) = \left( N_{k,j}e^{\textstyle i\,\mbox{Arg}(D_{k,j})}+ G_{k,j}(\sigma)\right) z_j(t_k), \quad k \in \mathbb{N}\,, \end{eqnarray} where $j=1,2$. The matrices $\Lambda_1(t)$, $\Lambda_2(t)$ are defined by $$ \Lambda_1(t)= \{\lambda_1(t), \lambda_2(t), \ldots, \lambda_m (t)\}, \; \Lambda_2(t)= \{\lambda_{m+1}(t), \lambda_{m+2}(t), \ldots, \lambda_n (t) \}, $$ and similarly the diagonal matrices $G_j(t)$, $N_{k,j}$ and $D_{k,j}$ are defined. The matrices $G_{k,j}(\sigma)$ satisfy estimate (\ref{34}). $\Gamma_j (t,\sigma)$ has the estimate (\ref{19}), where instead of $\sigma$ it is necessary to write $3\sigma$, $F_j(t,\sigma)$ has the estimate (\ref{20}) and $$\|\int_s^tV_j(\tau)d\tau \| \leq 3 \|\int_s^tS^{-1}(\tau)S'(\tau)d\tau \|\leq 3L(\delta,p) K(\sigma )(t-s),\quad t\geq s. $$ The Gronwall inequality for piecewise continuous functions \cite{bs2} gives the following estimates for $Z_i(t)$, the fundamental matrices of systems (\ref{35}), $j=1,2$: $$\displaylines{ \|Z_1(t)Z_1^{-1}(s)\| \leq L\exp \big\{\int_s^t \mu_1(\tau, \varepsilon)d\tau\big\}, \quad s\leq t, \cr \|Z_2(t)Z_2^{-1}(s)\| \leq L\exp \big\{\int_s^t \mu_2(\tau,\varepsilon)d\tau \big\}, \quad t \leq s, }$$ where $L$ is a constant non depending on $\varepsilon$ neither on $\sigma$, and $$\displaylines{ \mu_1(t,\varepsilon ) = \frac{Re(\lambda_m(t))}{\mu(t,\varepsilon)}+u_m(t)+ L_4 \sigma +3 L(\delta,p)K(\sigma), \cr \mu_2(t,\varepsilon ) = \frac{Re(\lambda_{m+1}(t))}{\mu(t,\varepsilon)}+u_{m+1}(t)+ L_4 \sigma + 3 L(\delta,p)K(\sigma ), } $$ with a constant $L_4=3+L_3$. Since the decoupled system (\ref{35}) is kinetically similar to System (\ref{1}), we obtain for this system the following \begin{theo} If the hypotheses {\bf H1-H5} are fulfilled, then for a small value of $\|\mu(\cdot, \varepsilon)\| $ the System (\ref{1}) has the following $(\mu_1,\mu_2)$-dichotomy: \begin{eqnarray}\label{36} &\|X(t,\varepsilon)P X^{-1}(s,\varepsilon) \| \leq L\exp \big\{\displaystyle\int_s^t \mu_1(\tau, \varepsilon)d\tau\big\}, \quad s\leq t, &\\ &\|X(t,\varepsilon)PX^{-1}(s,\varepsilon)\| \leq L\exp \big\{ \displaystyle \int_s^t \mu_2(\tau,\varepsilon) d\tau \big\}, \quad t \leq s\,, \nonumber \end{eqnarray} where $L$ is a constant independent of $\varepsilon$ and $\sigma$. \end{theo} \section{Dichotomies for linear differential systems } In this section we present some applications of formulas (\ref{36}). \subsection*{The case $\|\mu(\cdot ,\varepsilon)\|_\infty \leq \varepsilon$} \begin{theo} Under conditions {\bf H1-H5}, if $\|\mu(\cdot ,\varepsilon)\| \leq \varepsilon$, $\varepsilon \in (0,\infty)$, then there exists a positive number $\varepsilon_0$ such that for each $\varepsilon \in (0, \varepsilon_0 )$, the impulsive system (\ref{1}) has the dichotomy (\ref{36}). \end{theo} In the particular case $\mu(t,\varepsilon)=\varepsilon$, we obtain the system \begin{eqnarray}\label{37} & \varepsilon x'(t) = A(t)x(t), \quad t \neq t_k ,\quad J=[0,\infty), & \\ &x(t_k^+ ) = C_k x(t_k^- ),\quad k \in \mathbb{N}=\{1,2,3,\ldots\}, \nonumber \end{eqnarray} and the dichotomy (\ref{36}) has the form $$ \begin{array}{rcl} \mu_1(t,\varepsilon) & = & \displaystyle \frac{Re(\lambda_m(t))+\varepsilon u_m(t)+L_4\varepsilon \sigma + 3\varepsilon L(\delta,p)K(\sigma)}{\varepsilon}\,, \\[3pt] \mu_2(t,\varepsilon) & = & \displaystyle \frac{Re(\lambda_{m+1}(t))+ \varepsilon u_{m+1}(t) + L_4 \varepsilon \sigma + 3\varepsilon L(\delta,p)K(\sigma) }{\varepsilon}\,. \end{array} $$ Considering in (\ref{37}) $C_k=I$ for $k \in N$, we obtain that the solutions of this sytems coincide with the solutions of the ordinary system with a small and a positive parameter at the derivative \begin{equation}\label{38} \varepsilon y'(t)=A(t)y(t). \end{equation} Denoting by $Y (t,\varepsilon) $ the fundamental matrix of System (\ref{38}), from (\ref{36}) we obtain the dichotomy $$\displaylines{ \|Y(t,\varepsilon )PY^{-1}(s,\varepsilon)\| \leq K\exp \big\{ \int_s^t \mu_1(\tau, \varepsilon) d\tau \big\}, \quad s\leq t, \cr \|Y(t,\varepsilon)(I-P)Y^{-1}(s,\varepsilon)\| \leq K\exp \big\{- \int_t^s \mu_2(\tau,\varepsilon) \big\}, \quad t \leq s, }$$ where $$\displaylines{ \mu_1(t,\varepsilon) = \frac{Re(\lambda_m(t))+L_4\varepsilon \sigma + \varepsilon L(\delta,0)K(\sigma)}{\varepsilon}, \cr \mu_2(t,\varepsilon) = \frac{Re(\lambda_{m+1}(t)) + L_4 \varepsilon \sigma + 3\varepsilon L(\delta,0)K(\sigma) }{\varepsilon}. }$$ If $Re(\lambda_m(t))\leq -\alpha <0$ and $Re(\lambda_m(t))\geq \beta >0 $, for all values of $t$, for a small $\varepsilon_0$, we obtain for (\ref{38}) the dichotomy $$\displaylines{ \|Y(t,\varepsilon)PY^{-1}(s,\varepsilon)\| \leq L\exp \left\{-\frac{ \alpha}{2 \varepsilon } (t-s) \right\}, \quad s\leq t, \cr \|Y(t,\varepsilon)(I-P)Y^{-1}(s,\varepsilon)\| \leq L\exp \big\{\frac{\beta }{2 \varepsilon }(t-s) \big\}, \quad t \leq s, }$$ for $\varepsilon \in (0, \varepsilon _0]$ and $L$ is independent of $\varepsilon$. This dichotomy was obtained by Chang \cite{chn} for almost periodic systems and by Mitropolskii-Lykova \cite{lm} for a system (\ref{38}) which function $A(t)$ is uniformly continuous on $J$. \subsection*{The case $\mu(t,\varepsilon)=\mu(t) \to 0, \; \mbox{ if } t \to \infty$} In this case the condition $\lim_{t \rightarrow \infty} \mu(t)=0$ allows to obtain a small value of $|\mu(t, \varepsilon)|$ if we consider $t \in [t_0, \infty)$. All the reasoning leading to Theorem 2 can be acomplished on the interval $[t_0,\infty)$ instead of $[0,\infty)$. \begin{theo} If we assume valid {\bf H1-H5}, where $U(t,\varepsilon)$ is defined with $$ \alpha_m(t,\varepsilon)=\frac{\lambda_m(t)-\lambda_{m+1}(t)} {\mu(t)}+u_m(t)-u_{m+1}(t), $$ (therefore $U(t,\varepsilon)$ does not depend on $\varepsilon$), then the impulsive system $$\displaylines{ \mu(t) x'(t) = A(t)x(t), \quad t \neq t_k ,\; J=[0,\infty) \cr x(t_k^+ ) = C_k x(t_k^- ),\quad k \in \mathbb{N}=\{1,2,3,\dots\}\,, }$$ has the dichotomy \begin{eqnarray*} &\|X(t)PX^{-1}(s)\| \leq K\exp \left\{\int_s^t \mu_1(\tau) d\tau \right\}, \quad s\leq t, &\\ &\|X(t)(I-P)X^{-1}(s)\| \leq K\exp \left\{\int_t^s \mu_2(\tau) \right\}, \quad t \leq s\,, \end{eqnarray*} where $$\displaylines{ \mu_1(t) = \frac{Re(\lambda_m(t))+L_4\mu(t) \sigma + \mu(t) L(\delta,0)K(\sigma)}{\mu(t)},\cr \mu_2(t) = \frac{Re(\lambda_{m+1}(t))+ L_4 \sigma \mu(t) + 3\mu(t) L(\delta,0)K(\sigma) }{\mu(t)}. }$$ \end{theo} As an application of the above formula let us consider the ordinary system \begin{equation}\label{39} \mu(t) x'(t)=A(t)x(t),\; \lim_{t \rightarrow \infty} \mu(t)=0.\end{equation} \begin{theo} If $A(\cdot)$ satisfies {\bf H1} and the function $U_m(t)$ defined in {\bf H5} with $$ \alpha_m(t,\varepsilon)=\frac{\lambda_m(t)-\lambda_{m+1}(t)}{\mu(t)}, $$ is bounded, then system (\ref{39}) has the dichotomy (\ref{36}), where $$\displaylines{ \mu_1(t) = \frac{Re(\lambda_m(t))+3\sigma \mu(t) + \mu(t) L(\delta,0)K(\sigma)}{\mu(t)}, \cr \mu_2(t) = \frac{Re(\lambda_{m+1}(t))- 3\sigma \mu(t) - 3\mu(t) L(\delta,0)K(\sigma) }{\mu(t)}. }$$ \end{theo} The above theorem gives conditions of existence of a $(\mu_1,\mu_2)$- dichotomy for (\ref{39}) with an unbounded function $\mu(t)^{-1}A(t)$. These systems have been studied in \cite{np3}. \begin{thebibliography}{00} \bibitem{be} Bellman, R.: {\it Stability Theory of Differential Equations}, Dover Publications, New York (1953). \bibitem{bs1} Bainov, D.D.; Simeonov, P.S.: {\it Systems with impulse effect (Stability Theory and Applications)}, Ellis Horwood \& John Wiley, New York (1989). \bibitem{bs2} Bainov, D.D.; Simeonov P.S.: {\it Integral Inequalities and Applications}, Kluwer Academic Publishers, Dordrecht (1992). \bibitem{bs} Simeonov, P.S.; Bainov, D.D.: Criteria for dichotomies of linear impulsive differential equations, {\it Tamkang J. Math.}, 25, 2, 101-112 (1994). \bibitem{con} Conway, J.B.: {\it Functions of One Complex Variables}, Springer, Student Edition, New York (1973). \bibitem{co} Coppel, W.A.: {\it Dichotomies in Stability Theory}, Lecture Notes in Mathematics, 629, Springer Verlag (1978). \bibitem{chn} Chang, K.W.: Almost periodic solutions of singularly perturbed systems of differential equations, {\it J. Diff. Eqns. } 4, 300-307 (1968). \bibitem{lbs} Lakshmikantham, V.; Bainov, D.D.; Simeonov, P.S.: {\it Theory of Impulsive Differential Equations}, World Scientific (1989). \bibitem{lm} Lizana, M.; Muldowney, J.S.: Existence and roughness of $(\mu_1, \mu_2)$-dichotomies, preprint (1997). \bibitem{ml} Mitropolskii, Y.A.; Likova, O.B.: {\it Integral Manifolds in Nonlinear Mechanich}, (Russian) Nauka, Moscow (1973). \bibitem{np1} R. Naulin, R.; Pinto, M.: Splitting of linear differential systems with impulsive effect, to appear in { \it Rocky Mountain J. Math.} (1999). \bibitem{np2} R. Naulin, R.; Pinto, M.: Quasi-diagonalization of linear impulsive systems and applications, {\it J. Math. Anal. Appl.,} Vol. 208, 281-297 (1997). \bibitem{np3} R. Naulin, R.; Pinto, M.: Dichotomies for differential systems with unbounded coefficients", {\it Dyn. Syst. Appl. }, 3, 333-348 (1994). \bibitem{pap} Papashinopoulos, G.: Some roughness results concerning reducibility for linear difference equations, {\it Internat. J. Math.} \& {\it Math. Sci. }, Vol. 11, N$^o$ 4, 793-804 (1988). \bibitem{sm} Smith, D.R.: {\it Singular Perturbation Theory}, Cambridge University Press (1985). \end{thebibliography} \noindent{\sc Ra\'ul Naulin }\\ Departamento de Matem\'aticas, Universidad de Oriente \\ Apartado 245, Cuman\'a 6101-A, Venezuela \\ e-mail: rnaulin@cumana.sucre.udo.edu.ve \end{document}