CAB.COR: Catania's Operant Reserve

Description Details References See Also Examples

Description

This is a stock implementation of Catania's Operant Reserve (COR; Catania, 2005). See references for a list papers on COR.

Details

In brief, COR is a model where the probability of responding is controlled by a construct referred to as an 'operant reserve'. The emission of responses depletes the value of the reserve. Reinforcement replenishes the reserve based on where reinforcement has occurred within the most recent inter-reinforcement interval. The rule that controls the replenishment to the reserve is the "delay of reinforcement gradient". See Catania (2005) and Berg & McDowell (2011) for more details.

Previous implementations of COR simulated responding at each possible discrete time point. That is, the model is asked at each point in time whether or not a response occurred. Our implementation simulates inter-response times rather than times at which responses occurred by asking when the next response will occur. This allows us to save a lot of computational time.

See examples for an example.

References

Berg, J. P., & McDowell, J. J (2011). Quantitative, steady-state properties of Catania's computational model of the operant reserve. Behavioural Processes, 87(1), 71-83. https://doi.org/10.1016/j.beproc.2011.01.006

Catania, A. C. (2005). The operant reserve: A computer simulation in (accelerated) real time. Behavioural Processes, 69(2), 257-278. https://doi.org/10.1016/j.beproc.2005.02.009

Li, D., Elliffe, D., & Hautus, M. J. (2017). Pre-Asymptotic Response Rates as a Function of the Delay-of-Reinforcement Gradient Summation for Catania's Operant Reserve: A Reply to Berg & McDowell (2011). Behavioural Processes. http://dx.doi.org/10.1016/j.beproc.2017.01.002

See Also

COR_helpers For functions associated with COR.

Examples

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
## Not run: 
# An example of our COR implementation.
# Make a model constructor
COR_maker = model_constructor( "COR", c( "DOR", "depletion", "initial_reserve", "rft_schedule", "emission", "rft_duration" ) )
# Make an event record
my_events = make.ragged_event_record( c("resp_time", "rft_time"), c(5000,1000) )

# Get the necessary functions. Use built-in functions:
# DOR:
COR.linear_DOR
# depletion:
COR.constant_depletion
# initial reserve value:
COR.initial_reserve
# reinforcemetn schedule:
COR.exponential_vi
# Behaviour emission function
COR.G_E_emission
# Reinforcement duration
COR.rft_duration
# Event record
my_events

# Define the necessary parameters. Parameter names are arguments in the model functions.
COR_params = {list( DOR_max = 0.01, DOR_scale = 2000,
    IRI_resp_times = rep( NaN, 1000 ), IRI_resps = 0, reserve_value = NaN,
   depletion_constant = 0.001, initial_reserve_constant = 0.75, inter_rft_interval = 30,
   time = 0, min_irt = 0.1, food_duration = 2,
   session_duration = 100, rft_arranged = FALSE, rft_time = NaN
  )}

#Make the model:
COR_model = { COR_maker( organism_params = COR_params, DOR = COR.linear_DOR,
   depletion = COR.constant_depletion, initial_reserve = COR.initial_reserve,
   rft_schedule = COR.exponential_vi, emission = COR.G_E_emission,
   rft_duration = COR.rft_duration, event_record = my_events
) }

# Make a function to run the model
COR.do = function( COR_params, COR_model ){
   set_param( COR_model, COR_params )
   initial_reserve_value = model_do( COR_model, "initial_reserve" )
   o_set( COR_model, "reserve_value", initial_reserve_value )
   reset_event( COR_model@event_record )

   repeat{
       if ( !COR_model["rft_arranged"] ){
           rft_time = model_do( COR_model, "rft_schedule" )
           o_set( COR_model, "rft_time", rft_time )
           o_set( COR_model, "rft_arranged", TRUE )
       }

       response_time = model_do( COR_model, "emission" )

       if ( is.nan( response_time ) | response_time > COR_model[ "session_duration" ] ) break

       iri_resp = COR_model[ "IRI_resps" ] + 1
       o_set( COR_model, "IRI_resps", iri_resp )
       o_set( COR_model, "IRI_resp_times", i = iri_resp, response_time )
       assign_event( COR_model@event_record, "resp_time", "next", response_time )
       o_set( COR_model, "time", response_time )

       post_depletion_reserve = model_do( COR_model, "depletion" )
       o_set( COR_model, "reserve_value", post_depletion_reserve )

       if ( response_time >= rft_time ){
           assign_event( COR_model@event_record, "rft_time", "next", response_time )
           replenishment = model_do( COR_model, "DOR" )
           o_set( COR_model, "reserve_value", replenishment )
           post_rft_time = model_do( COR_model, "rft_duration" )
           o_set( COR_model, "time", post_rft_time )
           o_set( COR_model, "IRI_resp_times", i = 1:COR_model[ "IRI_resps" ], NaN )
           o_set( COR_model, "IRI_resps", 0 )
           o_set( COR_model, "rft_arranged", FALSE )
       }
   }

   trim_event_record( COR_model@event_record )
# Get some statistics from the event record
   session_rates = compute.session_rates( COR_model@event_record, list( rft_time = -COR_model[ "food_duration" ] ), session_duration = COR_model[ "session_duration" ] )
   last_resp_time = max( COR_model@event_record@events$resp_time )
   first_resp_time = min( COR_model@event_record@events$resp_time )

   irt = compute.IxyI( COR_model@event_record, x_event = "resp_time", break_event = "rft_time" )
   iri = compute.IxyI( COR_model@event_record, x_event = "rft_time", x_offset = -COR_model[ "food_duration" ] )
   prp = compute.IxyI( COR_model@event_record, x_event = "rft_time", y_event = "resp_time", x_offset = -COR_model[ "food_duration" ] )

   behavioural_profile_1 = list( session_rates, first_resp_time, last_resp_time, irt, iri, prp )
   behavioural_profile_1
}

# Run the model
COR.do( COR_params, COR_model )

## End(Not run)

Don-Li/CAB documentation built on May 6, 2019, 2:52 p.m.