# -*- coding: utf-8 -*- """ Created on Mon Mar 25 16:18:52 2024 @author: G Turinici: april 2024 Goal : compute the gradient of $J(\beta)= S(0)-S(T) + \int_0^T c(\beta(t)) dt$ Note: we start first with $J(\beta)= S(0)-S(T)$ with respect to $\beta(t)$ where $S(\cdot)$ is the solution of the SIR system S'= -\beta(t) S I I' = \beta(t) S I - \gamma I R' = \gamma I Here \beta is a given python function """ import numpy as np from scipy.integrate import odeint from scipy.interpolate import interp1d import matplotlib.pyplot as plt T=150 N=150 dt=T/N beta_cst=1./2. gamma_cst=1./3. #example of beta function def constant_beta(t): return beta_cst beta_function = constant_beta #procedure : compute the gradient def gradient(beta_f): """ input : a function beta_f output : gradient of S(T) with repect to beta_f Steps : 1/ compute direct evolution and obtain S,I,R as functions of time Note: take care at the "extrapolation" 2/ compute reverse evolution for the adjoint 3/ compute gradient at each point and make a function Note : one can use interp1d python subroutine. In all cases compute the values at points and then output the interpolation function. """ # Step 1: def sir_f(t,x): """ input x of shape (3,) equation SIR : S'= -\beta(t) S I I' = \beta(t) S I - \gamma I R' = \gamma I """ S,I,R=x Nt=S+I+R return np.array([- beta_f(t)*S*I/Nt, beta_f(t)*S*I/Nt- gamma_cst*I, gamma_cst*I]) Sinit,Iinit,Rinit=1.e+6,10.,0. Ntpop=Sinit+Iinit+Rinit X0=np.array([Sinit,Iinit,Rinit]) trange=np.linspace(0,T,N+1,endpoint=True) odeint_sir=odeint(sir_f,X0,t=trange,tfirst=True) print(odeint_sir.shape) Svalues, Ivalues,Rvalues=odeint_sir.T #Svalues=odeint_sir[0,:] print(Svalues.shape) S_function = interp1d(trange,Svalues,fill_value="extrapolate") I_function = interp1d(trange,Ivalues,fill_value="extrapolate") #Step 2 def sir_adjoint(t,x): """ Solve equation \lambda' = \beta* I(\lambda-\mu)/N mu' = \beta* S(\lambda-\mu)/N + \gamma*\mu \lambda(T)=1, \mu(T) = 0 """ l_adjoint,mu_adjoint = x return np.array([ beta_f(t)*I_function(t)*(l_adjoint-mu_adjoint)/Ntpop, beta_f(t)*S_function(t)*(l_adjoint-mu_adjoint)/Ntpop+ gamma_cst*mu_adjoint ]) l_mu_values= odeint(sir_adjoint,np.array([1,0]),t=trange[::-1], tfirst=True)[::-1] Lvalues,Mvalues=l_mu_values.T # Step 3: compute gradient and return it as function return interp1d(trange, -Svalues*Ivalues*(Lvalues-Mvalues)/Ntpop ,fill_value="extrapolate") grad=gradient(beta_function) trange=np.linspace(0,T,N+1,endpoint=True) plt.plot(trange, [grad(t) for t in trange] ) #test now another beta, obtained from the first one # by applying a kind of gradient descent procedure maxgrad=np.max(np.abs([grad(t) for t in trange])) def beta2_function(t): return beta_cst- np.abs(grad(t))/maxgrad/4 plt.plot(trange, [beta2_function(t) for t in trange] ) grad2=gradient(beta2_function) plt.plot(trange, [grad2(t) for t in trange] )