Skip to content

remeta.plotting

plot_psychometric

Plot psychometric function.

Usage

Psychometric curve for empirical data:

plot_psychometric(stimuli, choices) # Data only

plot_psychometric(stimuli, choices, type1_noise, type1_bias, ...) # Data + model

plot_psychometric(stimuli, choices, params=params) # Data + model

Psychometric curve for a simulation instance: (remeta.simulation.Simulation):

plot_psychometric(simulation) # (Simulated) Data

plot_psychometric(simulation, model_prediction=True) # (Simulated) Data + model

Model only:

plot_psychometric(type1_noise=type1_noise, type1_bias=type1_bias, ...) # Model

plot_psychometric(params) # Model

Parameters:

Name Type Description Default
stimuli_or_simulation list[float] | NDArray[float] | Simulation | None

1d stimulus array (normalized to [-1; 1]) or [Simulation][remeta.simulation.Simulation] object

None
choices list[float] | NDArray[float] | None

1d choice array

None
type1_noise float | list[float] | None

Type 1 noise.

None
type1_bias float | None

Type 1 bias.

None
type1_thresh float | list[float] | None

Type 1 threshold.

None
stim_max float | None

maximum stimulus intensity.

None
params dict | None

instead of passing parameters as separate parameters, one can pass a parameter dictionary

None
cfg Configuration | None

If a Configuration instance is passed, checks are performed for cfg.param_type1_bias.enable and cfg.param_type1_thresh.enable model_prediction: whether to show model-predicted values for comparison

None
model_only bool

Show the model prediction only (auto-set to True if no data are passed)

False
highlight_fit bool

Emphasize the fit over the data

False
errorbars bool

Whether to include error bars for empirical data (SD)

True
Source code in remeta/plotting.py
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
def plot_psychometric(
        stimuli_or_simulation: list[float] | np.typing.NDArray[float] | Simulation | None = None,
        choices: list[float] | np.typing.NDArray[float] | None = None,
        type1_noise: float | list[float] | None = None,
        type1_bias: float | None = None,
        type1_thresh: float | list[float] | None = None,
        stim_max: float | None = None,
        params: dict | None = None,
        type1_noise_dist: str = 'normal',
        cfg: Configuration | None = None,
        model_prediction: bool = False,
        model_only: bool = False,
        highlight_fit: bool = False,
        errorbars: bool = True,
        path_export: str | None = None
) -> None:
    """Plot psychometric function.

    Usage:
        **Psychometric curve for empirical data:**

        `plot_psychometric(stimuli, choices)`  # Data only

        `plot_psychometric(stimuli, choices, type1_noise, type1_bias, ...)`  # Data + model

        `plot_psychometric(stimuli, choices, params=params)`  # Data + model

        **Psychometric curve for a simulation instance: (`remeta.simulation.Simulation`):**

        `plot_psychometric(simulation)`  # (Simulated) Data

        `plot_psychometric(simulation, model_prediction=True)`  # (Simulated) Data + model

        **Model only:**

        `plot_psychometric(type1_noise=type1_noise, type1_bias=type1_bias, ...)`  # Model

        `plot_psychometric(params)`  # Model

    Args:
        stimuli_or_simulation: 1d stimulus array (normalized to [-1; 1]) or [Simulation][remeta.simulation.Simulation] object
        choices: 1d choice array
        type1_noise: Type 1 noise.
        type1_bias: Type 1 bias.
        type1_thresh: Type 1 threshold.
        stim_max: maximum stimulus intensity.
        params: instead of passing parameters as separate parameters, one can pass a parameter dictionary
        cfg: If a [Configuration][remeta.configuration.Configuration] instance is passed, checks are performed for
             `cfg.param_type1_bias.enable` and `cfg.param_type1_thresh.enable`
         model_prediction: whether to show model-predicted values for comparison
        model_only: Show the model prediction only (auto-set to True if no data are passed)
        highlight_fit: Emphasize the fit over the data
        errorbars: Whether to include error bars for empirical data (SD)
    """

    if type1_noise is not None or params is not None:
        model_prediction = True

    if stimuli_or_simulation is not None and choices is None:  # assume a dataset is passed
        cfg = stimuli_or_simulation.cfg
        stimuli = stimuli_or_simulation.stimuli
        choices = stimuli_or_simulation.choices
        if model_prediction:
            params = stimuli_or_simulation.params
            type1_noise = params['type1_noise']
            type1_bias = params['type1_bias'] if cfg.param_type1_bias.enable else None
            type1_thresh = params['type1_thresh'] if cfg.param_type1_thresh.enable else None
    else:
        if choices is None:
            model_only = True
        stimuli = None if model_only else stimuli_or_simulation
        if model_prediction:
            if params is None:
                if type1_noise is None:
                    raise ValueError('Model predictions requested, but type 1 noise is unspecified.')
                params = dict(type1_noise=type1_noise)
            else:
                type1_noise = params['type1_noise']
                if 'type1_bias' in params and not (cfg is not None and not cfg.param_type1_bias.enable):
                    type1_bias = params['type1_bias']
                if 'type1_thresh' in params and not (cfg is not None and not cfg.param_type1_thresh.enable):
                    type1_thresh = params['type1_thresh']

    if stim_max is None:
        stim_max = 1 if stimuli is None else np.max(np.abs(stimuli))

    xrange_neg = np.linspace(-stim_max, 0, 1000)
    xrange_pos = np.linspace(stim_max / 1000, stim_max, 1000)

    if model_prediction:
        type1_noise = _check_param(type1_noise)
        if (cfg is None and type1_thresh is not None) or (cfg is not None and cfg.param_type1_thresh.enable):
            params['type1_thresh'] = type1_thresh
            type1_thresh = _check_param(type1_thresh)
        else:
            type1_thresh = [0, 0]

        if (cfg is None and type1_bias is not None) or (cfg is not None and cfg.param_type1_bias.enable):
            params['type1_bias'] = type1_bias
            type1_bias = _check_param(type1_bias)
        else:
            type1_bias = [0, 0]

        if cfg is not None:
            type1_noise_dist = cfg.param_type1_noise.model
        cdf = _logistic if type1_noise_dist == 'logistic' else _normal
        posterior_neg = cdf(xrange_neg, type1_noise[0], type1_thresh[0], type1_bias[0])
        posterior_pos = cdf(xrange_pos, type1_noise[1], type1_thresh[1], type1_bias[1])


    fig = plt.figure(figsize=(6, 3.5))
    fig.subplots_adjust(bottom=0.2)
    gs = fig.add_gridspec(1, 2, width_ratios=[1, 0.5], wspace=0)
    ax = fig.add_subplot(gs[0, 0])
    ax_leg = fig.add_subplot(gs[0, 1])
    ax_leg.axis("off")
    plt.sca(ax)


    if not model_only:
        stimulus_ids = (stimuli > 0).astype(int)
        levels = np.unique(stimuli)
        choiceprob_neg = np.array([np.mean(choices[(stimuli == v) & (stimulus_ids == 0)] ==
                                           stimulus_ids[(stimuli == v) & (stimulus_ids == 0)])
                                   for v in levels[levels < 0]])
        choiceprob_pos = np.array([np.mean(choices[(stimuli == v) & (stimulus_ids == 1)] ==
                                           stimulus_ids[(stimuli == v) & (stimulus_ids == 1)])
                                   for v in levels[levels > 0]])
        if errorbars:
            choiceprob_neg_se = np.array([sem(choices[(stimuli == v) & (stimulus_ids == 0)] ==
                                               stimulus_ids[(stimuli == v) & (stimulus_ids == 0)])
                                       for v in levels[levels < 0]])
            choiceprob_pos_se = np.array([sem(choices[(stimuli == v) & (stimulus_ids == 1)] ==
                                               stimulus_ids[(stimuli == v) & (stimulus_ids == 1)])
                                       for v in levels[levels > 0]])
        else:
            choiceprob_neg_se, choiceprob_pos_se = np.nan, np.nan
        plt.errorbar(levels[levels < 0], 1-choiceprob_neg, yerr=choiceprob_neg_se, fmt='o', color=color_data, mec='k', ls='' if model_prediction else '-',
                     label=f'Data $S^-$ (mean{["", "±SE"][int(errorbars)]})', clip_on=False, zorder=11, alpha=(1, 0.2)[highlight_fit])
        if not model_prediction:
            plt.plot([levels[levels < 0][-1], levels[levels > 0][0]], [1-choiceprob_neg[-1], choiceprob_pos[0]], color=color_data)
        plt.errorbar(levels[levels > 0], choiceprob_pos, yerr=choiceprob_pos_se, fmt='s', color=color_data, mec='k', ls='' if model_prediction else '-',
                     label=f'Data $S^+$ (mean{["", "±SE"][int(errorbars)]})', clip_on=False, zorder=11, alpha=(1, 0.2)[highlight_fit])

    if model_prediction:
        plt.plot(xrange_neg, posterior_neg, '-', lw=(2, 5)[highlight_fit], color=color_model, clip_on=False,
                 zorder=(10, 12)[highlight_fit], label=f'Model')
        plt.plot(xrange_pos, posterior_pos, '-', lw=(2, 5)[highlight_fit], color=color_model, clip_on=False,
                 zorder=(10, 12)[highlight_fit])

    plt.plot([-stim_max, stim_max], [0.5, 0.5], 'k-', lw=0.5)
    plt.plot([0, 0], [-0.02, 1.02], 'k-', lw=0.5)

    ax.yaxis.grid('on', color=[0.9, 0.9, 0.9])
    plt.xlim((-stim_max, stim_max))
    plt.ylim((0, 1))
    plt.xlabel('Stimulus ($x$)')
    plt.ylabel('Choice probability $S^+$')
    ax.xaxis.set_major_formatter(FormatStrFormatter('%.2g'))

    if model_prediction:
        anot_type1 = []
        for i, (k, v) in enumerate(params.items()):
            # if (cfg is None and k in params) or (cfg is not None and getattr(cfg, f"enable_param_{k}")) and k.startswith('type1_'):
            if k.startswith('type1') and (cfg is None or (cfg is not None and getattr(cfg, f"param_{k}").enable)):
                if hasattr(v, '__len__'):
                    val = ', '.join([fmp(p) for p in v])
                    anot_type1 += [f"${symbols[k][1:-1]}=" + f"[{val}]$"]
                else:
                    anot_type1 += [f"${symbols[k][1:-1]}={fmp(v)}$"]
        plt.text(1.045, 0.1, r'Parameters:' + '\n' + '\n'.join(anot_type1), transform=ax.transAxes,
                 bbox=dict(fc=[1, 1, 1], ec=[0.5, 0.5, 0.5], lw=1, pad=5), fontsize=11)
    set_fontsize(label='default', tick='default')

    # ax_leg.legend(bbox_to_anchor=(1.01, 1), loc="upper left", fontsize=11, handlelength=1)
    ax_leg.legend(*ax.get_legend_handles_labels(), loc="upper left", fontsize=11, handlelength=1)

    if path_export is not None:
        plt.savefig(path_export, bbox_inches='tight', pad_inches=0.02)

plot_stimulus_versus_confidence

Plot the relationship between stimulus levels and confidence.

Usage

Plot for empirical data:

plot_stimulus_versus_confidence(stimuli, choices) # Data

plot_stimulus_versus_confidence(stimuli, choices, type1_noise, type2_noise, ...) # Data + Model

plot_stimulus_versus_confidence(stimuli, choices, params, ...) # Data + Model

Plot for simulation instance: (remeta.simulation.Simulation):

plot_stimulus_versus_confidence(simulation) # (Simulated) Data

plot_stimulus_versus_confidence(simulation, model_prediction=True) # (Simulated) Data + model

Model only:

plot_confidence_histogram(type1_noise=type1_noise, type2_noise=type2_noise, ...) # Model

plot_stimulus_versus_confidence(params) # Model

Parameters:

Name Type Description Default
stimuli_or_simulation list[float] | NDArray[float] | Simulation | None

1d stimulus array (normalized to [-1; 1]) or [Simulation][remeta.simulation.Simulation] object

None
confidence list[float] | NDArray[float] | None

1d confidence array

None
choices list[float] | NDArray[float] | None

1d choice array

None
params dict | None

pass parameters as a dictionary

None
type1_noise float | list[float] | None

Type 1 noise.

None
type1_bias float | list[float] | None

Type 1 bias.

None
type1_thresh float | list[float] | None

Type 1 threshold.

None
type2_noise float | None

Metacognitive noise. Required

None
type2_evidence_bias_mult float | None

Multiplicative metacognitive bias

None
type2_criteria float | list[float] | None

Confidence criteria

None
cfg Configuration | None

Place holder - checking the configuration object is not yet implemented

None
stim_max float | None

maximum stimulus intensity

None
errorbar_type str | None

either None (disable), 'SD' (standard deviation) or 'SEM' (standard error)

'SEM'
probability_correct bool

if True, convert confidence (range 0-1) to subjective probability correct (range 0.5-1),

False
separate_by_accuracy bool

separate plots for correct and incorrect responses

False
model_prediction bool

whether to show model-predicted values for comparison

False
model_prediction_nsamples int

number of samples used to generate model predictions

10000
model_only bool

Show the model prediction only (auto-set to True if no data are passed)

False
model_prediction_disable_type2_noise bool

plot model prediction with ~0 metacognitive noise

False
Source code in remeta/plotting.py
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
def plot_stimulus_versus_confidence(
        stimuli_or_simulation: list[float] | np.typing.NDArray[float] | Simulation | None = None,
        confidence: list[float] | np.typing.NDArray[float] | None = None,
        choices: list[float] | np.typing.NDArray[float] | None = None,
        params: dict | None = None,
        type1_noise: float | list[float] | None = None,
        type1_bias: float | list[float] | None = None,
        type1_thresh: float | list[float] | None = None,
        type2_noise: float | None = None,
        type2_evidence_bias_mult: float | None = None,
        type2_criteria: float | list[float] | None = None,
        cfg: Configuration | None = None,
        stim_max: float | None = None,
        errorbar_type: str | None = 'SEM',
        probability_correct: bool = False,
        separate_by_accuracy: bool = False,
        model_prediction: bool = False,
        model_prediction_nsamples: int = 10000,
        model_only: bool = False,
        model_prediction_disable_type2_noise: bool = False,
        path_export: str | None = None
) -> None:
    """ Plot the relationship between stimulus levels and confidence.

    Usage:
        **Plot for empirical data:**

        `plot_stimulus_versus_confidence(stimuli, choices)`  # Data

        `plot_stimulus_versus_confidence(stimuli, choices, type1_noise, type2_noise, ...)`  # Data + Model

        `plot_stimulus_versus_confidence(stimuli, choices, params, ...)`  # Data + Model

        **Plot for simulation instance: (`remeta.simulation.Simulation`):**

        `plot_stimulus_versus_confidence(simulation)`  # (Simulated) Data

        `plot_stimulus_versus_confidence(simulation, model_prediction=True)`  # (Simulated) Data + model

        **Model only:**

        `plot_confidence_histogram(type1_noise=type1_noise, type2_noise=type2_noise, ...)`  # Model

        `plot_stimulus_versus_confidence(params)`  # Model

    Args:
        stimuli_or_simulation: 1d stimulus array (normalized to [-1; 1]) or [Simulation][remeta.simulation.Simulation] object
        confidence: 1d confidence array
        choices: 1d choice array
        params: pass parameters as a dictionary
        type1_noise: Type 1 noise.
        type1_bias: Type 1 bias.
        type1_thresh: Type 1 threshold.
        type2_noise: Metacognitive noise. Required
        type2_evidence_bias_mult: Multiplicative metacognitive bias
        type2_criteria: Confidence criteria
        cfg: Place holder - checking the configuration object is not yet implemented
        stim_max: maximum stimulus intensity
        errorbar_type: either None (disable), 'SD' (standard deviation) or 'SEM' (standard error)
        probability_correct: if True, convert confidence (range 0-1) to subjective probability correct (range 0.5-1),
        separate_by_accuracy: separate plots for correct and incorrect responses
        model_prediction: whether to show model-predicted values for comparison
        model_prediction_nsamples: number of samples used to generate model predictions
        model_only: Show the model prediction only (auto-set to True if no data are passed)
        model_prediction_disable_type2_noise: plot model prediction with ~0 metacognitive noise
    """

    if stimuli_or_simulation is not None and confidence is None:  # assume first parameter is a simulated dataset
        from copy import deepcopy
        cfg = deepcopy(stimuli_or_simulation.cfg)
        stimuli, confidence, choices = stimuli_or_simulation.stimuli, stimuli_or_simulation.confidence, stimuli_or_simulation.choices
        params = stimuli_or_simulation.params.copy()
    else:

        if separate_by_accuracy and choices is None and not model_only:
            raise ValueError('If separate_by_accuracy is True, choices must be passed.')

        if confidence is None or params is not None or (type1_noise is not None and type2_noise is not None):
            model_prediction = True
        stimuli = None if model_only else stimuli_or_simulation
        choices = None if model_only else choices
        if stimuli is None:
            model_only = True
        if model_prediction:
            if params is None:
                if type1_noise is None:
                    raise ValueError('Type 1 noise is unspecified.')
                if type2_noise is None:
                    raise ValueError('Type 2 noise is unspecified.')
                params = dict(type1_noise=type1_noise, type2_noise=type2_noise)
                for param in ('type1_bias', 'type1_thresh', 'type2_evidence_bias_mult', 'type2_criteria'):
                    if (value := eval(param)) is not None and not (cfg is not None and not getattr(cfg, f'param_{param}').enable):
                        params[param] = value
            else:
                params = params.copy()
                if 'type1_noise' not in params:
                    raise ValueError('Type 1 noise is unspecified.')
                if 'type2_noise' not in params:
                    raise ValueError('Type 2 noise is unspecified.')
                for param in ('type1_bias', 'type1_thresh', 'type2_evidence_bias_mult', 'type2_criteria'):
                    if param in params and (cfg is not None and not getattr(cfg, f'param_{param}').enable):
                        params.pop(param)

    if model_prediction and model_prediction_disable_type2_noise:
        params['type2_noise'] = 1e-5


    if probability_correct:
        confidence = (confidence + 1) / 2

    fig = plt.figure(figsize=(6, 3.5))
    fig.subplots_adjust(bottom=0.2)
    gs = fig.add_gridspec(1, 2, width_ratios=[1, 0.5], wspace=0)
    ax = fig.add_subplot(gs[0, 0])
    ax_leg = fig.add_subplot(gs[0, 1])
    ax_leg.axis("off")
    plt.sca(ax)
    ax.xaxis.set_major_formatter(FormatStrFormatter('%.2g'))

    if stim_max is None:
        stim_max = 1 if stimuli is None else np.max(np.abs(stimuli))


    if errorbar_type is not None:
        if errorbar_type == 'SD':
            errorfun = np.std
        elif errorbar_type == 'SEM':
            errorfun = sem
        else:
            raise ValueError(f"Unknown errorbar type '{errorbar_type}' (valid: 'SD', 'SEM')")

    chance_level_ref = 0.75 if probability_correct else 0.5
    if not separate_by_accuracy:
        plt.plot([-1.05*stim_max, 1.05*stim_max], [chance_level_ref, chance_level_ref], lw=1, color=[0.2, 0.2, 0.2],
                 ls=':', label='Theoretical value\nat chance level')
    plt.plot([0, 0], [0, 1], 'k-', lw=0.5)

    if not model_only:
        stim_levels = sorted(np.unique(stimuli))
        if separate_by_accuracy:
            accuracy = np.sign(stimuli) == np.sign(choices - 0.5)
            conf_av_inc = np.array([np.nan if (cnd :=(~accuracy & (stimuli == stim_level))).sum() == 0 else np.mean(confidence[cnd]) for stim_level in stim_levels])
            conf_av_cor = np.array([np.nan if (cnd :=(accuracy & (stimuli == stim_level))).sum() == 0 else np.mean(confidence[cnd]) for stim_level in stim_levels])
            if errorbar_type is not None:
                with warnings.catch_warnings():
                    warnings.filterwarnings('ignore', category=RuntimeWarning)
                    conf_err_inc = np.array([np.nan if (cnd :=(~accuracy & (stimuli == stim_level))).sum() == 0 else errorfun(confidence[cnd]) for stim_level in stim_levels])
                    conf_err_cor = np.array([np.nan if (cnd :=(accuracy & (stimuli == stim_level))).sum() == 0 else errorfun(confidence[cnd]) for stim_level in stim_levels])
            else:
                conf_err_inc = np.zeros(len(stim_levels))
                conf_err_cor = np.zeros(len(stim_levels))
            plt.errorbar(
                stim_levels,
                conf_av_cor,
                yerr=None if errorbar_type is None else conf_err_cor,
                marker='o', markersize=5, mew=1, mec='k', color='None', ecolor=color_cor, mfc=color_cor, clip_on=False,
                elinewidth=1.5, capsize=5, label=f'Data (correct,\nmean{"" if errorbar_type is None else f{errorbar_type}"})'
            )
            plt.errorbar(
                stim_levels,
                conf_av_inc,
                yerr=None if errorbar_type is None else conf_err_inc,
                marker='o', markersize=5, mew=1, mec='k', color='None', ecolor=color_inc, mfc=color_inc, clip_on=False,
                elinewidth=1.5, capsize=5, label=f'Data (incorrect,\nmean{"" if errorbar_type is None else f{errorbar_type}"})'
            )
            conf_min_data = min(np.nanmin(conf_av_inc - conf_err_inc), np.nanmin(conf_av_cor - conf_err_cor))
        else:
            conf_av = np.array([np.mean(confidence[stimuli == stim_level]) for stim_level in stim_levels])
            if errorbar_type is not None:
                conf_err = np.array([errorfun(confidence[stimuli == stim_level]) for stim_level in stim_levels])
            else:
                conf_err = np.zeros(len(stim_levels))

            plt.errorbar(
                stim_levels,
                conf_av,
                ls='' if model_prediction else '-',
                yerr=None if errorbar_type is None else conf_err,
                marker='o', markersize=5, mew=1, mec='k', color=color_data, ecolor='k', mfc=color_data, clip_on=False,
                elinewidth=1.5, capsize=5, label=f'Data (mean{"" if errorbar_type is None else f{errorbar_type}"})'
            )
            conf_min_data = np.min(conf_av - conf_err)

    if model_prediction:

        # nlevels = 100
        # nrows_per_level = int(np.ceil(model_prediction_nsamples / nlevels / 2))
        # stim_min = stim_max / nlevels
        # levels = np.hstack((np.linspace(-stim_max, -stim_min, nlevels), np.linspace(stim_min, stim_max, nlevels)))
        # x_stim = np.tile(levels, (nrows_per_level, 1))
        #
        # y_decval = stimulus_to_decision_value(x_stim, params, return_only_decval=True)
        # c_conf = type1_evidence_to_confidence(
        #     z1_type1_evidence=np.abs(y_decval), y_decval=y_decval,
        #     **params
        # ).mean(axis=0)

        _nsamples = int(np.ceil(model_prediction_nsamples / 2))
        stim_min = stim_max / _nsamples
        levels = np.hstack((np.linspace(-stim_max, -stim_min, _nsamples), np.linspace(stim_min, stim_max, _nsamples)))
        # y_decval = stimulus_to_decision_value(levels, params, return_only_decval=True)
        # c_conf = type1_evidence_to_confidence(
        #     z1_type1_evidence=np.abs(y_decval), x_stim=levels,
        #     **params
        # )
        ds = simulate(
            nsubjects=100,
            params=params, cfg=cfg, custom_stimuli=levels, verbosity=False,
            stim_max=stim_max, squeeze=True, compute_stats=False,
            silence_warnings=True
        )
        c_conf = (ds.confidence + 1) / 2 if probability_correct else ds.confidence

        # from scipy.interpolate import UnivariateSpline

        if 'type1_bias' in params:
            if listlike(params['type1_bias']):
                warnings.warn(f'Stimulus-dependent bias is currently not supported for this plot.')
                bias = params['type1_bias'][0]
            else:
                bias = params['type1_bias']
        else:
            bias = 0

        # Constrain the spline in a way that it is exactly chance_level_ref for x_stim = -bias
        # We do this by weighting an additional datapoint at x_stim = -bias very highly
        # ind_levels_min = np.argmin(np.abs(levels + bias))
        # # insert data point at the position that is closest to already present values, while not
        # # preserving ordering
        # insert_idx = ind_levels_min + 1 if (levels[ind_levels_min] + bias < 0) else ind_levels_min
        # weights = np.insert(np.ones_like(c_conf), insert_idx, 1e8)
        # c_conf_aug = np.insert(c_conf, insert_idx, chance_level_ref)
        # levels_aug = np.insert(levels, insert_idx, -bias)
        # c_conf_final = UnivariateSpline(levels_aug, c_conf_aug, k=3, w=weights)(levels)

        from scipy.ndimage import gaussian_filter1d
        if separate_by_accuracy:
            accuracy = np.sign(ds.choices - 0.5) == np.sign(ds.stimuli)
            c_conf_inc_, c_conf_cor_ = c_conf.copy(), c_conf.copy()
            c_conf_inc_[accuracy] = np.nan
            c_conf_cor_[~accuracy] = np.nan
            with warnings.catch_warnings():
                warnings.simplefilter('ignore', category=RuntimeWarning)
                c_conf_inc_mean, c_conf_cor_mean = np.nanmean(c_conf_inc_, axis=0), np.nanmean(c_conf_cor_, axis=0)
            # Interpolate nans
            isnan_inc, isnan_cor = np.isnan(c_conf_inc_mean), np.isnan(c_conf_cor_mean)
            c_conf_inc_mean[isnan_inc] = np.interp(np.arange(2*_nsamples)[isnan_inc], np.arange(2*_nsamples)[~isnan_inc], c_conf_inc_mean[~isnan_inc])
            c_conf_cor_mean[isnan_cor] = np.interp(np.arange(2*_nsamples)[isnan_cor], np.arange(2*_nsamples)[~isnan_cor], c_conf_cor_mean[~isnan_cor])
            c_conf_inc = gaussian_filter1d(c_conf_inc_mean, sigma=model_prediction_nsamples/20)
            c_conf_cor = gaussian_filter1d(c_conf_cor_mean, sigma=model_prediction_nsamples/20)
            plt.plot(levels, c_conf_cor, '-', lw=2, color=color_cor, clip_on=False,
                     label='Model prediction\n(correct)', alpha=0.5)
                     # label='Model prediction' + (r"($\sigma_2=0$)" if model_prediction_disable_type2_noise else "")+'\n(correct)', alpha=0.5)
            plt.plot(levels, c_conf_inc, '-', lw=2, color=color_inc, clip_on=False,
                     label='Model prediction\n(incorrect)', alpha=0.5)
                     # label='Model prediction' + (r"($\sigma_2=0$)" if model_prediction_disable_type2_noise else "")+'\n(incorrect)', alpha=0.5)
            conf_min_model = min(min(c_conf_inc), min(c_conf_cor))
        else:
            c_conf_final = gaussian_filter1d(c_conf.mean(axis=0), sigma=model_prediction_nsamples/20)
            plt.plot(levels, c_conf_final, '-', lw=2, color=color_model, clip_on=False,
                     label=f'Model prediction')
                     # label=f'Model prediction' + (r"($\sigma_2=0$)" if model_prediction_disable_type2_noise else ""))
            conf_min_model = min(c_conf_final)

    plt.xlim(-1.05*stim_max, 1.05*stim_max)
    conf_min = conf_min_model if model_only else (min(conf_min_data, conf_min_model) if model_prediction else conf_min_data)
    step = 0.05 if probability_correct else 0.1
    ymin = (np.floor(conf_min / step) * step) if conf_min < chance_level_ref else chance_level_ref - step / 2
    plt.ylim(ymin, 1)
    plt.xlabel('Stimulus ($x$)')
    plt.ylabel('Subjective prob. correct' if probability_correct else 'Confidence ($C$)')

    if model_prediction:
        anot_type2 = []
        for i, (k, v) in enumerate(params.items()):
            if k.startswith('type2_'):
                if listlike(v):
                    val = ', '.join([fmp(p) for p in v])
                    anot_type2 += [f"${symbols[k][1:-1]}=" + f"[{val}]$"]
                else:
                    anot_type2 += [f"${symbols[k][1:-1]}={fmp(v)}$"]
        plt.text(1.045, 0.1-0.2*separate_by_accuracy, r'Type 2 parameters:' + '\n' + '\n'.join(anot_type2), transform=ax.transAxes,
                 bbox=dict(fc=[1, 1, 1], ec=[0.5, 0.5, 0.5], lw=1, pad=5), fontsize=11)

    # plt.legend(bbox_to_anchor=(1.01, 1), loc="upper left", fontsize=11, handlelength=1)
    ax_leg.legend(*ax.get_legend_handles_labels(), loc="upper left", fontsize=11, handlelength=1)

    set_fontsize(label='default', tick='default')
    if path_export is not None:
        plt.savefig(path_export, bbox_inches='tight', pad_inches=0.02)

plot_confidence_histogram

Plot the relationship between stimulus levels and confidence.

Usage

Plot for empirical data:

plot_confidence_histogram(confidence) # Data

plot_confidence_histogram(confidence, stimuli, choices, separate_by_accuracy=True) # Data

plot_confidence_histogram(confidence, type1_noise, type2_noise, ...) # Data + Model

plot_confidence_histogram(confidence, params, ...) # Data + Model

Plot for simulation instance: (remeta.simulation.Simulation):

plot_confidence_histogram(simulation) # (Simulated) Data

plot_confidence_histogram(simulation, model_prediction=True) # (Simulated) Data + model

Model only:

plot_confidence_histogram(type1_noise=type1_noise, type2_noise=type2_noise, ...) # Model

plot_confidence_histogram(params) # Model

Parameters:

Name Type Description Default
confidence_or_simulation list[float] | NDArray[float] | Simulation | None

1d stimulus array (normalized to [-1; 1]) or [Simulation][remeta.simulation.Simulation] object

None
stimuli list[float] | NDArray[float] | None

1d stimulus array

None
choices list[float] | NDArray[float] | None

1d choice array

None
params dict[str, float] | None

pass parameters as a dictionary

None
type1_noise float | list[float] | None

Type 1 noise.

None
type1_bias float | list[float] | None

Type 1 bias.

None
type1_thresh float | list[float] | None

Type 1 threshold.

None
type2_noise float | None

Metacognitive noise. Required

None
type2_evidence_bias_mult float | None

Multiplicative metacognitive bias

None
type2_criteria list[float] | None

Confidence criteria

None
cfg Configuration | None

Place holder - checking the configuration object is not yet implemented

None
stim_max float | None

float | None = None,

None
probability_correct bool

if True, convert confidence (range 0-1) to subjective probability correct (range 0.5-1)

False
model_prediction bool

whether to show model-predicted values for comparison

False
model_only bool

Show the model prediction only (auto-set to True if no data are passed)

False
separate_by_category bool

separate histograms for the two stimulus categories

False
separate_by_accuracy bool

separate histograms for correct and incorrect responses

False
model_prediction_nsamples int

number of samples used to generate model predictions

10000
Source code in remeta/plotting.py
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
def plot_confidence_histogram(
        confidence_or_simulation: list[float] | np.typing.NDArray[float] | Simulation | None = None,
        stimuli: list[float] | np.typing.NDArray[float] | None = None,
        choices: list[float] | np.typing.NDArray[float] | None = None,
        params: dict[str, float] | None = None,
        type1_noise: float | list[float] | None = None,
        type1_bias: float | list[float] | None = None,
        type1_thresh: float | list[float] | None = None,
        type2_noise: float | None = None,
        type2_evidence_bias_mult: float | None = None,
        type2_criteria: list[float] | None = None,
        cfg: Configuration | None = None,
        stim_max: float | None = None,
        probability_correct: bool = False,
        model_prediction: bool = False,
        model_only: bool = False,
        separate_by_category: bool = False,
        separate_by_accuracy: bool = False,
        model_prediction_nsamples: int = 10000,
        path_export: str | None = None
) -> None:
    """ Plot the relationship between stimulus levels and confidence.

    Usage:
        **Plot for empirical data:**

        `plot_confidence_histogram(confidence)`  # Data

        `plot_confidence_histogram(confidence, stimuli, choices, separate_by_accuracy=True)`  # Data

        `plot_confidence_histogram(confidence, type1_noise, type2_noise, ...)`  # Data + Model

        `plot_confidence_histogram(confidence, params, ...)`  # Data + Model

        **Plot for simulation instance: (`remeta.simulation.Simulation`):**

        `plot_confidence_histogram(simulation)`  # (Simulated) Data

        `plot_confidence_histogram(simulation, model_prediction=True)`  # (Simulated) Data + model

        **Model only:**

        `plot_confidence_histogram(type1_noise=type1_noise, type2_noise=type2_noise, ...)`  # Model

        `plot_confidence_histogram(params)`  # Model

    Args:
        confidence_or_simulation: 1d stimulus array (normalized to [-1; 1]) or [Simulation][remeta.simulation.Simulation] object
        stimuli: 1d stimulus array
        choices: 1d choice array
        params: pass parameters as a dictionary
        type1_noise: Type 1 noise.
        type1_bias: Type 1 bias.
        type1_thresh: Type 1 threshold.
        type2_noise: Metacognitive noise. Required
        type2_evidence_bias_mult: Multiplicative metacognitive bias
        type2_criteria: Confidence criteria
        cfg: Place holder - checking the configuration object is not yet implemented
        stim_max: float | None = None,
        probability_correct: if True, convert confidence (range 0-1) to subjective probability correct (range 0.5-1)
        model_prediction: whether to show model-predicted values for comparison
        model_only: Show the model prediction only (auto-set to True if no data are passed)
        separate_by_category: separate histograms for the two stimulus categories
        separate_by_accuracy: separate histograms for correct and incorrect responses
        model_prediction_nsamples: number of samples used to generate model predictions
    """

    if separate_by_accuracy:
        separate_by_category = False

    if model_only:
        model_prediction = True

    if isinstance(confidence_or_simulation, Simulation):
        from copy import deepcopy
        cfg = deepcopy(confidence_or_simulation.cfg)
        stimuli, confidence, choices = confidence_or_simulation.stimuli, confidence_or_simulation.confidence, confidence_or_simulation.choices
        params = confidence_or_simulation.params.copy()
    else:

        if params is not None or (type1_noise is not None and type2_noise is not None):
            model_prediction = True
        confidence = None if model_only else confidence_or_simulation
        stimuli = None if model_only else stimuli
        choices = None if model_only else choices
        if confidence is None:
            model_only = True

        if separate_by_accuracy and choices is None and not model_only:
            raise ValueError('If separate_by_accuracy is True, choices must be passed.')

        if model_prediction:
            if params is None:
                if type1_noise is None:
                    raise ValueError('Type 1 noise is unspecified.')
                if type2_noise is None:
                    raise ValueError('Type 2 noise is unspecified.')
                params = dict(type1_noise=type1_noise, type2_noise=type2_noise)
                for param in ('type1_bias', 'type1_thresh', 'type2_evidence_bias_mult', 'type2_criteria'):
                    if (value := eval(param)) is not None and not (cfg is not None and not getattr(cfg, f'param_{param}').enable):
                        params[param] = value
            else:
                params = params.copy()
                if 'type1_noise' not in params:
                    raise ValueError('Type 1 noise is unspecified.')
                if 'type2_noise' not in params:
                    raise ValueError('Type 2 noise is unspecified.')
                for param in ('type1_bias', 'type1_thresh', 'type2_evidence_bias_mult', 'type2_criteria'):
                    if param in params and (cfg is not None and not getattr(cfg, f'param_{param}').enable):
                        params.pop(param)


    if probability_correct:
        confidence = (confidence + 1) / 2

    if stim_max is None:
        stim_max = 1 if stimuli is None else np.max(np.abs(stimuli))

    fig = plt.figure(figsize=(6, 3.5))
    fig.subplots_adjust(bottom=0.2)
    gs = fig.add_gridspec(1, 2, width_ratios=[1, 0.5], wspace=0)
    ax = fig.add_subplot(gs[0, 0])
    ax_leg = fig.add_subplot(gs[0, 1])
    ax_leg.axis("off")
    plt.sca(ax)
    ax.xaxis.set_major_formatter(FormatStrFormatter('%.3g'))

    if not model_only:
        conf_levels = np.sort(np.unique(confidence))
        n_conf_levels = len(conf_levels)
        bins = np.linspace(0, 1, n_conf_levels+1) if n_conf_levels >= 8 else np.hstack((0, conf_levels[1:] - np.diff(conf_levels) / 2, 1))

        if separate_by_category:
            counts0 = np.histogram(confidence[np.sign(stimuli) == -1], bins=bins)[0]
            counts1 = np.histogram(confidence[np.sign(stimuli) == 1], bins=bins)[0]
            centers = 0.5 * (bins[:-1] + bins[1:])
            widths  = bins[1:] - bins[:-1]
            for i, (x, w, c0, c1) in enumerate(zip(centers, widths, counts0, counts1)):
                if c0 < c1:
                    plt.bar(x, c1, width=w, color=(0.8, 0.8, 0.8), ec='grey', align='center', zorder=1, label='Data ($S^+$)' if i == 0 else None)
                    plt.bar(x, c0, width=w, color=(0.4, 0.4, 0.4), ec='grey', align='center', zorder=2, label='Data ($S^-$)' if i == 0 else None)
                else:
                    plt.bar(x, c0, width=w, color=(0.4, 0.4, 0.4), ec='grey', align='center', zorder=1, label='Data ($S^-$)' if i == 0 else None)
                    plt.bar(x, c1, width=w, color=(0.8, 0.8, 0.8), ec='grey', align='center', zorder=2, label='Data ($S^+$)' if i == 0 else None)
        elif separate_by_accuracy:
            accuracy = np.sign(stimuli) == np.sign(choices - 0.5)
            counts_inc = np.histogram(confidence[~accuracy], bins=bins)[0]
            counts_cor = np.histogram(confidence[accuracy], bins=bins)[0]
            centers = 0.5 * (bins[:-1] + bins[1:])
            widths  = bins[1:] - bins[:-1]
            for i, (x, w, c0, c1) in enumerate(zip(centers, widths, counts_inc, counts_cor)):
                if c0 < c1:
                    plt.bar(x, c1, width=w, color=color_cor, ec='grey', align='center', zorder=1, label='Data (correct)' if i == 0 else None)
                    plt.bar(x, c0, width=w, color=color_inc, ec='grey', align='center', zorder=2, label='Data (incorrect)' if i == 0 else None)
                else:
                    plt.bar(x, c0, width=w, color=color_inc, ec='grey', align='center', zorder=1, label='Data (incorrect)' if i == 0 else None)
                    plt.bar(x, c1, width=w, color=color_cor, ec='grey', align='center', zorder=2, label='Data (correct)' if i == 0 else None)
        else:
            plt.hist(confidence, bins=bins, color=(0.8, 0.8, 0.8), edgecolor='grey', clip_on=False, label='Data')

    if model_prediction:
        if model_only:
            _nsamples = int(np.ceil(model_prediction_nsamples / 2))
            bins = np.linspace(0, 1, 5)
        else:
            _nsamples = len(confidence)
        stim_min = stim_max / _nsamples
        levels = np.hstack((np.linspace(-stim_max, -stim_min, _nsamples), np.linspace(stim_min, stim_max, _nsamples)))
        # y_decval = stimulus_to_decision_value(levels, params, return_only_decval=True)
        # c_conf = type1_evidence_to_confidence(
        #     z1_type1_evidence=np.abs(y_decval), x_stim=levels,
        #     **params
        # )
        nsubjects = 100 if model_only else 500
        ds = simulate(
            nsubjects=nsubjects,
            params=params, cfg=cfg, custom_stimuli=levels, verbosity=False,
            stim_max=stim_max, squeeze=True, compute_stats=False,
            silence_warnings=True
        )
        c_conf = (ds.confidence + 1) / 2 if probability_correct else ds.confidence

        if separate_by_category:
            counts0_ = [np.histogram(c_conf[s][np.sign(ds.stimuli[s]) == -1], bins=bins)[0] for s in range(nsubjects)]
            counts1_ = [np.histogram(c_conf[s][np.sign(ds.stimuli[s]) == 1], bins=bins)[0] for s in range(nsubjects)]
            counts0 = np.array([(counts0_[s] / (2*_nsamples if model_only else counts0_[s].sum())) * (1 if model_only else (stimuli < 0).sum()) for s in range(nsubjects)])
            counts1 = np.array([(counts1_[s] / (2*_nsamples if model_only else counts1_[s].sum())) * (1 if model_only else (stimuli > 0).sum()) for s in range(nsubjects)])
            plt.errorbar(
                bins[:-1] + np.diff(bins) / 2 - 0.04, counts0.mean(axis=0),
                yerr=np.percentile(counts0, 97.5, axis=0) - np.percentile(counts0, 2.5, axis=0),
                mew=2, elinewidth=2, capsize=7, capthick=2, fmt='o', markersize=8, mec=color_model, mfc=(0.4, 0.4, 0.4),
                color=color_model, lw=2, label='Model prediction ($S^-$)'
            )
            plt.errorbar(
                bins[:-1] + np.diff(bins) / 2 + 0.04, counts1.mean(axis=0),
                yerr=np.percentile(counts1, 97.5, axis=0) - np.percentile(counts1, 2.5, axis=0),
                mew=2, elinewidth=2, capsize=7, capthick=2, fmt='o', markersize=8, mec=color_model, mfc=(0.8, 0.8, 0.8),
                color=color_model, lw=2, label='Model prediction ($S^+$)'
            )
        elif separate_by_accuracy:
            acc = [np.sign(ds.stimuli[s]) == np.sign(ds.choices[s] - 0.5) for s in range(nsubjects)]
            counts_inc_ = [np.histogram(c_conf[s][~acc[s]], bins=bins)[0] for s in range(nsubjects)]
            counts_cor_ = [np.histogram(c_conf[s][acc[s]], bins=bins)[0] for s in range(nsubjects)]
            print(_nsamples, counts_inc_[0].sum()+counts_cor_[0].sum())
            counts_inc = np.array([(counts_inc_[s] / (2*_nsamples if model_only else counts_inc_[s].sum())) * (1 if model_only else (~accuracy).sum())for s in range(nsubjects)])
            counts_cor = np.array([(counts_cor_[s] / (2*_nsamples if model_only else counts_cor_[s].sum())) * (1 if model_only else accuracy.sum()) for s in range(nsubjects)])
            plt.errorbar(
                bins[:-1] + np.diff(bins) / 2 - 0.04, counts_inc.mean(axis=0),
                yerr=np.percentile(counts_inc, 97.5, axis=0) - np.percentile(counts_inc, 2.5, axis=0),
                mew=2, elinewidth=2, capsize=7, capthick=2, fmt='o', markersize=8, mec=color_model, mfc=color_inc,
                color=color_model, lw=2, label='Model prediction\n(incorrect)'
            )
            plt.errorbar(
                bins[:-1] + np.diff(bins) / 2 + 0.04, counts_cor.mean(axis=0),
                yerr=np.percentile(counts_cor, 97.5, axis=0) - np.percentile(counts_cor, 2.5, axis=0),
                mew=2, elinewidth=2, capsize=7, capthick=2, fmt='o', markersize=8, mec=color_model, mfc=color_cor,
                color=color_model, lw=2, label='Model prediction\n(correct)'
            )
        else:
            counts_ = [np.histogram(c_conf[s], bins=bins)[0] for s in range(nsubjects)]
            counts = np.array([(counts_[s] / (2*_nsamples)) * (1 if model_only else len(confidence)) for s in range(nsubjects)])
            plt.errorbar(
                bins[:-1] + np.diff(bins) / 2, counts.mean(axis=0),
                yerr=np.percentile(counts, 97.5, axis=0) - np.percentile(counts, 2.5, axis=0),
                mew=2, elinewidth=2, capsize=7, capthick=2, fmt='o', markersize=8, mec=color_model, mfc=(0.8, 0.8, 0.8),
                color=color_model, lw=2, label='Model prediction'
            )


    plt.xticks(bins)
    plt.xlabel('Confidence')
    plt.ylabel('Probability' if model_only else 'Count')
    plt.xlim(0, 1)

    if model_prediction:
        anot_type2 = []
        cfg = None  # keep in case cfg is implemented in the future
        for i, (k, v) in enumerate(params.items()):
            if k.startswith('type2_'):
                if listlike(v):
                    val = ', '.join([fmp(p) for p in v])
                    anot_type2 += [f"${symbols[k][1:-1]}=" + f"[{val}]$"]
                else:
                    anot_type2 += [f"${symbols[k][1:-1]}={fmp(v)}$"]
        plt.text(1.055, 0.1-0.2*(separate_by_accuracy | separate_by_category), r'Type 2 parameters:' + '\n' + '\n'.join(anot_type2), transform=ax.transAxes,
                 bbox=dict(fc=[1, 1, 1], ec=[0.5, 0.5, 0.5], lw=1, pad=5), fontsize=11)

    # plt.legend(bbox_to_anchor=(1.01, 1), loc="upper left", fontsize=11, handlelength=1)
    ax_leg.legend(*ax.get_legend_handles_labels(), loc="upper left", fontsize=11, handlelength=1)

    set_fontsize(label='default', tick='default')

    if path_export is not None:
        plt.savefig(path_export, bbox_inches='tight', pad_inches=0.02)