function DEM = ADEM_eyeblink(OPTION) % Simulation of eyeblink conditioning % FORMAT DEM = ADEM_eyeblink(OPTION) % % OPTION: % case{'EYEBLINK'} : spontaneous eye blinking % case{'AIRPUFF'} : unconditioned eyeblink response to air puff % case{'STARTLE'} : unconditioned startle response to a sound % case{'TRACE'} : trace conditioning to the sound % case{'DELAY'} : delay conditioning to the sound % case{'EXTINCTION'} : extinction of trace conditioning to sound % %__________________________________________________________________________ % % This demonstration routine illustrates Pavlovian learning under active % inference. It uses the eyeblink conditioning paradigm to model startle % responses and the subsequent acquisition of an eyeblink – under delay and % trace learning. The various options above determine the nature of the % simulation (or edit the OPTION below). The generative model, in this % example, starts with a heteroclinic cycle with an inset. The cycle per se % generates autonomous eyeblinks periodically, while the inset is % activated by a conditioned stimulus (CS). The subsequent unstable fixed % points play the role of an echo-state and enables the learning or % association of a high-level hidden cause with subsequent unconditioned % responses (UR). % % In active inference, an unconditioned response corresponds to a prior % belief that a hidden state will generate action and the unconditioned % stimulus (US). Pavlovian conditioning is the learning of the Association % between a conditioned stimulus (CS) and the unconditioned response. The % dynamics entailed by the heteroclinic cycle enable trace conditioning, % which may be related to hippocampal function. % % In this example, there are two levels with the hidden states at the first % level modelling beliefs about eyeblinks, unconditioned responses and % unconditioned stimuli. Proprioceptive predictions are generated by % beliefs about ensuing eyeblinks and unconditioned responses (which % also predict the conditioned stimulus. Hidden states at the second level % embody a sense of time through Lotka-Volterra dynamics. Successive epochs % of time are passed to the first level via a softmax transform. Learning % corresponds to Hebbian plasticity (that minimises free energy) in the % connections between the state unit encoding expectations about a UR and % expectations about the CS (for delay conditioning) and heteroclinic % states (for trace conditioning): see the functions at the end of this % routine. %__________________________________________________________________________ % Copyright (C) 2008 Wellcome Trust Centre for Neuroimaging % Karl Friston % $Id: ADEM_eyeblink.m 6655 2015-12-23 20:21:27Z karl $ % paradigm and stimuli %========================================================================== % options: %-------------------------------------------------------------------------- if ~nargin, OPTION = 'STARTLE'; end P.x = 0; % connection strengths P.v = [0 0 0]; % connection strengths PC = 0; % prior covariance V = [8 4 0]; % sensory precision switch OPTION case{'EYEBLINK'} % spontaneous eye blinking N = 256; % number of time bins NT = 1; % number of trials C(:,1) = sparse(1,N); % CS: loud sound C(:,2) = sparse(1,N); % US: air puff case{'AIRPUFF'} % unconditional response to US N = 256; % number of time bins NT = 1; % number of trials CS = 128; % onset of CS (bins) C(:,1) = sparse(1,N); % CS: loud sound C(:,2) = exp(-((1:N) - CS).^2/(2*4^2)); % US: air puff case{'STARTLE'} % startle response to CS N = 256; % number of time bins NT = 1; % number of trials CS = 128; % onset of CS (bins) C(:,1) = exp(-((1:N) - CS).^2/(2*4^2)); % CS: loud sound C(:,2) = sparse(1,N); % US: air puff case{'DELAY'} % delay learning of association N = 64; % number of time bins NT = 16; % number of trials CS = 48; % onset of CS (bins) C(:,1) = exp(-((1:N) - CS).^2/(2*8^2)); % CS: loud sound C(:,2) = exp(-((1:N) - CS - 4).^2/(2*4^2)); % US: air puff PC = 1; % enable learning case{'TRACE'} % trace learning of association (over time) N = 128; % number of time bins NT = 32; % number of trials CS = 48; % onset of CS (bins) C(:,1) = exp(-((1:N) - CS).^2/(2*4^2)); % CS: loud sound C(:,2) = exp(-((1:N) - CS - 32).^2/(2*4^2)); % US: air puff PC = 1; % enable learning case{'EXTINCTION'} % normalextinction N = 128; % number of time bins NT = 8; % number of trials CS = 48; % onset of CS (bins) C(:,1) = exp(-((1:N) - CS).^2/(2*4^2)); % CS: loud sound C(:,2) = sparse(1,N); % US: air puff P.x = 0; % connection strengths P.v = [0 0 0.6]; % connection strengths PC = 1; % enable learning V = [8 0 0]; % pirotoxin lesion V = [8 2 0]; % sensory precision otherwise end % generative process %========================================================================== M(1).E.n = 2; M(1).E.d = 2; M(1).E.s = 1; M(1).E.nN = 2; M(1).E.nE = 1; % level 1 %-------------------------------------------------------------------------- G(1).g = @(x,v,a,P) G1g(x,v,a,P); G(1).V = exp(16); % error precision G(1).U = exp(4); % motor gain % level 2 %-------------------------------------------------------------------------- G(2).v = [0;0]; % stimulus (CS and US) G(2).a = 0; % action G(2).V = exp(16); % generative model %========================================================================== % positions associated with each state (on unit circle) %-------------------------------------------------------------------------- x.CS = 0; x.UR = 0; x.EB = 0; % level 1 %-------------------------------------------------------------------------- M(1).x = x; M(1).f = @(x,v,P) M1f(x,v,P); M(1).g = @(x,v,P) M1g(x,v,P); M(1).pE = P; M(1).pC = PC; M(1).W = diag(exp([4 0 4])); % cerebellar (IPN) lesion M(1).W = exp(4); % error precision M(1).V = exp(V); % sensory attenuation % for use with spm_ALAP (for state-dependent sensory attenuation) %-------------------------------------------------------------------------- M(1).E.method.x = 0; M(1).ph = @(x,v,h,M) [8 4 4] - 2*(x.UR + x.EB); % sensory attenuation % level 2 %-------------------------------------------------------------------------- P = [ +1 -0.5 0.2 0.2 0.2 0.2; +0.5 1 -0.5 0 0 0 ; -0.5 0.5 1 -0.5 0 0 ; -0.5 0 0.5 1 0 0.5; -0.5 0 0 0.5 1 -0.5; -0.5 0 0 -0.5 0.5 1 ] - 1; M(2).x = [0 0 0 4 0 0]' - 4; M(2).f = @(x,v,P) spm_lotka_volterra(x,v,P); M(2).g = @(x,v,P) spm_softmax(x); M(2).pE = P; M(2).V = exp(0); % hippocampal lesion M(2).V = exp(4); % error precision M(2).W = exp(4); % error precision % level 3 %-------------------------------------------------------------------------- M(3).v = 0; % inputs (null) M(3).V = exp(16); % ADEM %========================================================================== DEM.U = sparse(N,1); DEM.C = C; DEM.G = G; DEM.M = M; DEM.db = 0; for i = 1:NT % integrate active inference scheme %---------------------------------------------------------------------- DEM = spm_ADEM(DEM); if i == 1 spm_figure('GetWin','Before learning'); else spm_figure('GetWin','After learning'); end spm_DEM_qU(DEM.qU,DEM.pU); if NT == 1, break, end % latency and vigour of CR %---------------------------------------------------------------------- a = DEM.qU.a{2}; a = abs(a(1:min(end,(CS + 32)))); if max(a) > 1/4; q = sum((1:length(a)).*a/sum(a)); else q = NaN; end CR(1,i) = q; CR(2,i) = max(a); % Baysian belief update %---------------------------------------------------------------------- p = DEM.qP.P{1}; DEM.M(1).pE = p; qP(:,i) = spm_vec(p); % plot %---------------------------------------------------------------------- spm_figure('GetWin','Figure 1'); clf subplot(2,2,1),cla plot((1:i),CR(1,:) - CR(1,1)), hold on plot((1:i),qP',':b'), hold on plot((1:i),CR(2,:),'--'), hold off set(gca,'XLim',[0 (NT + 1)]) title('latency and vigour','FontSize',16); xlabel('trials') ylabel('response') axis square subplot(2,2,2),cla imagesc(qP) title('plasticity','FontSize',16); set(gca,'XLim',[0.5 (NT + .5)]) xlabel('trials') ylabel('connection') axis square end % functions of generative process and model (at the first level) %========================================================================== function s = G1g(x,v,a,P) % sensory process s.CS = v(1); % CS: amplitude sound s.US = v(2); % US: air puff s.R = a; % proprioception function s = M1g(x,v,P) % sensory prediction s.CS = x.CS; % prediction of CS s.US = x.UR; % prediction of US s.R = x.UR + x.EB; % prediction of UR function f = M1f(x,v,P) % dynamics of states f.CS = v(1) - x.CS; % dynamics of CS f.UR = P.v*v(1:3) + P.x*x.CS - x.UR; % dynamics of UR construct f.EB = v(6) - x.EB; % dynamics of eyeblinks