-
Notifications
You must be signed in to change notification settings - Fork 1
/
regressions possible.tex
701 lines (584 loc) · 37.9 KB
/
regressions possible.tex
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
%% LyX 2.3.2 created this file. For more info, see http://www.lyx.org/.
%% Do not edit unless you really know what you are doing.
\documentclass[french]{article}
\usepackage[T1]{fontenc}
\usepackage[latin9]{inputenc}
\usepackage{float}
\usepackage{amsmath}
\usepackage{amssymb}
\usepackage{esint}
\makeatletter
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% LyX specific LaTeX commands.
%% Because html converters don't know tabularnewline
\providecommand{\tabularnewline}{\\}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% User specified LaTeX commands.
\newcommand\independent{\protect\mathpalette{\protect\independenT}{\perp}}
\def\independenT#1#2{\mathrel{\rlap{$#1#2$}\mkern2mu{#1#2}}}
\makeatother
\usepackage{babel}
\makeatletter
\addto\extrasfrench{%
\providecommand{\og}{\leavevmode\flqq~}%
\providecommand{\fg}{\ifdim\lastskip>\z@\unskip\fi~\frqq}%
}
\makeatother
\begin{document}
\title{Régressions possibles}
\maketitle
\section{Termes}
\subsection{Variables}
\begin{tabular}{c|ccc}
gain & subjective & truth & estimation\tabularnewline
\hline
numeric & $g$ & $\gamma$ & $\widehat{\gamma}$\tabularnewline
binary & $\dot{G}$ ($g>0$), $G$ ($g\geq0$) & $\Gamma$ & $\widehat{\Gamma}$\tabularnewline
\end{tabular}
\begin{tabular}{c|c|c|c|c|c|c|c}
step & \multicolumn{3}{c|}{partial tax: \emph{p }(non compensated: $\mathring{p}$)} & \emph{ex ante} & \multicolumn{2}{c|}{after knowledge: \emph{K}} & after targeting\tabularnewline
\hline
variants & transport & housing & VAT & $-$ & progressivity & feedback & $-$\tabularnewline
\hline
exponent & \emph{T }($\mathring{T}$) & \emph{H }($\mathring{H}$) & \emph{V }($\mathring{V}$) & \emph{I} & \emph{P} & \emph{F} & \emph{C}\tabularnewline
\end{tabular}
\begin{itemize}
\item $L$: $\left[-2;+2\right]$ loss following partial tax
\item $g,G,\gamma,\Gamma,\widehat{\gamma},\widehat{\Gamma}$: gain from
a reform
\item $a/\dot{A}/A$: approval category (Yes/No/PNR) / approval (Yes) /
acceptance (Yes or PNR) of a reform
\item $\pi$: /20/30/40/50/70/ income category of the respondent and their
household
\item $R$ (resp. $R_{2}$): income of the respondent (resp. of their partner)
\item $T$ (resp. $T_{2}$): dummy that the respondent (resp. their partner)
receives a payment in the tax with targeting ($T=\left(R<c\right)$)
\item $\Theta$: payment received as compensation from the tax
\item $\mathbf{C}$: vector of controls
\item $\mathbf{E}$: energetic characteristics
\item $e$ (resp. $\overline{e}$): subjective elasticity of the respondent
(resp. of French people)
\item $\Delta E$: increase in spending: $\Delta E=d\left(\mathbf{E}\right)\cdot\Delta\tau\cdot\left(1-\varepsilon\right)$
\item $\Delta X^{v}=X^{v}-X$ for $X\in\left\{ A;G\right\} $ and $v\in\left\{ C;F;P\right\} $
\item $U$: update in the appropriate direction
\end{itemize}
%
\begin{itemize}
\item $L^{\mathring{T}}$/$L^{\mathring{L}}$ perte\_relative\_partielle:
$\left[-2;+2\right]$ suite à hausse taxe partielle
\item $G^{p}$ gagnant\_partielle\_categorie: G/N/P suite à hausse taxe
partielle compensée
\item $G$ gagnant\_categorie: G/N/P suite à hausse taxe compensée
\item $g$ gain: $\left[-6;+5\right]$ suite à hausse taxe compensée
\item $A^{I}$ taxe\_approbation: Oui/Non/NSP approbation hausse taxe compensée
avant info (\emph{I} pour initial ou ignorant)
\item $P$ progressivite: Oui/Non/NSP hausse taxe compensée avantagerait
les plus modestes (seulement pour apres\_modifs)
\item $\widehat{\Gamma}$ simule\_gagnant: ménage simulé gagnant avec 5
chances sur 6 suite à hausse taxe compensée
\item $\widehat{\gamma}$ simule\_gain: gain simulé du ménage suite à hausse
taxe compensée
\item $G^{P}$/$G^{F}$: gagnant\_{[}progressif/feedback{]}\_categorie:
G/N/P suite à hausse taxe compensée et à affichage de l'info sur la
progressivité $I^{P}$ / simule\_gagnant $\widehat{\Gamma}$
\item $\widehat{\Gamma^{C}}$ simule\_gain\_cible: gain simulé du ménage
suite à hausse taxe compensée
\item $A^{P}$/$A^{F}$ taxe\_{[}progressif/feedback{]}\_approbation: Oui/Non/NSP
approbation hausse taxe compensée après info progressivité et/ou simule\_gagnant
($A^{r}$ pour renseigné)
\item $\pi$ categorie\_cible: /20/30/40/50/70/ catégorie de revenus du
répondant et de son ménage
\item $T$ (resp. $T_{2}$) traite\_cible: indicatrice que le répondant
(resp. son conjoint) reçoit un versement dans la taxe avec compensation
ciblée ($T=\left(R<c\right)$)
\item $\Theta$ (resp. $\Theta^{C}$) versement: versement reçu comme compensation
de la taxe
\item $G^{C}$ gagnant\_cible\_categorie: G/N/P suite à hausse taxe avec
compensation ciblée
\item $A^{C}$ taxe\_cible\_approbation: Oui/Non/NSP approbation hausse
taxe avec compensation ciblée
\item $R$ (resp. $R_{2}$): revenu (resp. revenu du conjoint)
\item $\mathbf{C}$: vecteur de contrôles
\item $\mathbf{E}$: caractéristiques énergétiques
\item $e$ (resp. $\overline{e}$): élasticité perso (resp. moyenne)
\item $\Delta E$ (resp. $\widehat{\Delta E}$): hausse des dépenses (resp.
estimées): $\Delta E=d\left(\mathbf{E}\right)\cdot\Delta\tau\cdot\left(1-e\right)$
\item $\Delta A^{v}=A^{v}-A^{I}$ pour $v\in\left\{ C;F;P\right\} $
\item $\Delta G^{v}=G^{v}-G$ pour $v\in\left\{ C;F\right\} $
\item \emph{U}: update\_correct vaut +1 si le répondant adopte le feedback
qui infirme sa croyance initiale, $-$1 s'il update contre le feedback
qui pourtant le confirme, 0 s'il n'update pas
\end{itemize}
\subsection{Réformes}
\begin{itemize}
\item $V$ hausse TVA
\item $\mathring{p}:\mathring{T},\mathring{L}$ hausse taxe partielle
\item $p:T,L$ hausse taxe partielle compensée
\item $\textrm{Ø}$ hausse taxe compensée
\item $C$ hausse taxe avec compensation ciblée (20/30/40/50 percentiles)
\end{itemize}
\subsection{Traitements}
\begin{itemize}
\item $p:T,L$ variante\_partielle: fuel ou chauffage
\item $S$ apres\_modifs: 2è moitié: rajout de questions et d'information
sur la progressivité
\item $r:F,P$ variante\_feedback: f/p: feedback (2/3) / progressivité (1/3)
\item $I^{P}$ info\_progressivite: info sur la progressivité ((variante:
progressivité) ou (apres\_modifs et variante: feedback et variante\_progressivite:
fb\_info))
\item $c$ cible $\lessgtr$ $\pi$ categorie\_cible: cible attribuée aléatoirement
comme max ou min de categorie\_cible (sauf pour categorie\_cible=>70)
\end{itemize}
\section{Intérêt personnel}
\subsection{Gain subjectif avec ciblage}
\[
A_{i}^{C}=\delta_{0}+\beta_{G}G_{i}^{C}+\delta_{A}A_{i}+\delta_{GA}G_{i}^{C}A_{i}^{I}+\epsilon_{i}
\]
\[
A_{i}^{C}=.04+.45^{***}G_{i}^{C}+.38^{***}A_{i}+.01G_{i}^{C}A_{i}^{I}+\epsilon_{i}
\]
\begin{table}[H]
\centering%
\begin{tabular}{c|cc|c}
$A^{C}$ & $G^{C}$ & $\neg G^{C}$ & total\tabularnewline
\hline
$A^{I}$ & .89 & .43 & .67\tabularnewline
$\neg A^{I}$ & .49 & .04 & .17\tabularnewline
\hline
total & .66 & .12 & .32\tabularnewline
\end{tabular}
\caption{Acceptance rate for targeted reform in function of \emph{ex ante}
acceptance and perceived personal benefit (non weighted)}
\end{table}
\subsection{Discontinuité du transfert ciblé}
\[
A_{i}^{C}=\delta_{0}+\beta_{T}T_{i}+\beta_{2}T_{2,i}+\beta_{T2}T_{i}\cdot T_{2,i}+\epsilon_{i}
\]
\[
A_{i}^{C}=\left\{ \begin{array}{c}
.31\\
.23\\
.20
\end{array}\right.+\left\{ \begin{array}{c}
.17^{**}\\
.18^{**}\\
.15^{*}
\end{array}\right.T_{i}+\left\{ \begin{array}{c}
.07\\
.13^{.}\\
.16^{*}
\end{array}\right.T_{2,i}+\left\{ \begin{array}{c}
-.01\\
-.07\\
-.11
\end{array}\right.T_{i}\cdot T_{2,i}+\epsilon_{i}\text{ for }\left\{ \begin{array}{c}
\pi_{i}=\left[20;30\right]\\
\pi_{i}=\left[30;40\right]\\
\pi_{i}=\left[40;50\right]
\end{array}\right.
\]
\[
A_{i}^{C}=\left\{ \begin{array}{c}
.31\\
.24\\
.22
\end{array}\right.+\left\{ \begin{array}{c}
.17^{**}\\
.15^{**}\\
.10^{*}
\end{array}\right.T_{i}+\left\{ \begin{array}{c}
.07\\
.09^{.}\\
.10^{*}
\end{array}\right.T_{2,i}+\epsilon_{i}\text{ for }\left\{ \begin{array}{c}
\pi_{i}=\left[20;30\right]\\
\pi_{i}=\left[30;40\right]\\
\pi_{i}=\left[40;50\right]
\end{array}\right.
\]
\[
A_{i}^{C}=\delta_{0}+.14^{***}T_{i}+.06^{**}T_{2,i}+\beta_{T2}T_{i}\cdot T_{2,i}+\delta_{C}C_{i}+\delta_{a}^{***}a_{i}^{I}+\boldsymbol{\delta_{R}R_{i}}+\epsilon_{i}
\]
\[
A_{i}^{C}=.31+.17^{**}T_{i}+.07\cdot T_{2,i}+\epsilon_{i}
\]
\subsection{Discontinuité instrumentée}
\begin{align*}
G_{i}^{C} & =\alpha_{0}+\alpha_{T}T_{i}+\alpha_{2}T_{2,i}+\gamma_{A}A_{i}^{I}\left(+\gamma_{R}R_{i}\right)+\eta_{i}\\
A_{i}^{C} & =\delta_{0}+\beta_{G}\widehat{G_{i}^{C}}\left(+\sum_{c}\beta_{c}\mathbf{1}_{c_{i}=c}+\beta_{G\cdot c}\widehat{G_{i}^{C}}\mathbf{1}_{c_{i}=c}\right)+\delta_{A}A_{i}^{I}\left(+\delta_{R}R_{i}\right)+\epsilon_{i}
\end{align*}
\begin{align*}
G_{i}^{C} & =.18+.21^{***}T_{i}+.13^{***}T_{2,i}+.25^{***}A_{i}^{I}+\eta_{i}\\
A_{i}^{C} & =-.02+.65^{***}\widehat{G_{i}^{C}}+.34^{***}A_{i}^{I}+\epsilon_{i}
\end{align*}
\subsection{Discontinuité d'un feedback positif}
\[
A_{i}^{F}=\delta_{0}+\beta_{\Gamma}\widehat{\Gamma_{i}}+\delta_{\gamma}\widehat{\gamma_{i}}+\delta_{\gamma^{2}}\widehat{\gamma_{i}}^{2}+\delta_{a}a_{i}^{I}+\epsilon_{i}
\]
\[
A_{i}^{F}=\delta_{0}+.08^{***}\widehat{\Gamma_{i}}+-6.10^{-5*}\widehat{\gamma_{i}}+-3.10^{-9*}\delta_{\gamma^{2}}\widehat{\gamma_{i}}^{2}+\delta_{a}a_{i}^{I}+\epsilon_{i}
\]
Effet similaire sur la variation d'approbation:
\[
\Delta A_{i}^{F}=\alpha_{0}+\beta_{\Gamma}\widehat{\Gamma_{i}}+\delta_{\gamma}\widehat{\gamma_{i}}+\delta_{\gamma^{2}}\widehat{\gamma_{i}}^{2}+\epsilon_{i}
\]
\[
\Delta A_{i}^{F}=\alpha_{0}+.11^{***}\widehat{\Gamma_{i}}+\delta_{\gamma}\widehat{\gamma_{i}}+\delta_{\gamma^{2}}\widehat{\gamma_{i}}^{2}+\epsilon_{i}
\]
En instrumentant:
\[
G_{i}^{F}=\alpha_{0}+.21^{***}\widehat{\Gamma_{i}}+\delta_{\gamma}\widehat{\gamma_{i}}+\delta_{\gamma^{2}}\widehat{\gamma_{i}}^{2}+\epsilon_{i}
\]
\[
A_{i}^{F}=\alpha_{0}+.40^{***}\widehat{G_{i}^{F}}+\delta_{\gamma}\widehat{\gamma_{i}}+\delta_{\gamma^{2}}\widehat{\gamma_{i}}^{2}+\epsilon_{i}
\]
\subsection{Simulation comme instrument (à travers $G^{F}$)}
introduire $A^{I}$ produit un effet
\begin{align*}
G_{i}^{F} & =\alpha_{0}+\beta_{\Gamma}\widehat{\Gamma_{i}}+\gamma_{A}A_{i}^{I}+\gamma_{R}R_{i}+\eta_{i}\\
A_{i}^{F} & =\delta_{0}+\beta_{G}\widehat{G_{i}^{F}}+\delta_{A}A_{i}^{I}+\delta_{R}R_{i}+\epsilon_{i}
\end{align*}
\begin{align*}
G_{i}^{F} & =\alpha_{0}+\beta_{\Gamma}\widehat{\Gamma_{i}}+\gamma_{A}A_{i}^{I}+\gamma_{R}R_{i}+\eta_{i}\\
A_{i}^{F} & =\delta_{0}+.48^{***}\widehat{G_{i}^{F}}+\delta_{A}A_{i}^{I}+\delta_{R}R_{i}+\epsilon_{i}
\end{align*}
\subsection{Simulation comme instrument (à travers $\Delta G^{F}$)}
introduire $A^{I}$ produit un effet
\begin{align*}
\Delta G_{i}^{F} & =\alpha_{0}+\widehat{\Gamma_{i}}+\gamma_{A}A_{i}^{I}+\gamma_{R}R_{i}+\eta_{i}\\
A_{i}^{F} & =\delta_{0}+\beta_{G}\widehat{\Delta G_{i}^{F}}+\delta_{A}A_{i}^{I}+\delta_{R}R_{i}+\epsilon_{i}
\end{align*}
\begin{align*}
\Delta G_{i}^{F} & =\alpha_{0}+\widehat{\Gamma_{i}}+\gamma_{A}A_{i}^{I}+\gamma_{R}R_{i}+\eta_{i}\\
A_{i}^{F} & =\delta_{0}+.65^{***}\widehat{\Delta G_{i}^{F}}+\delta_{A}A_{i}^{I}+\delta_{R}R_{i}+\epsilon_{i}
\end{align*}
TODO: quels contrôles?
\section{Persistance et biais des croyances}
\subsection{Persistance après la simulation}
\[
\Delta G_{i}^{F}=\alpha_{0}+\beta_{\Gamma}\widehat{\Gamma_{i}}+\delta_{\gamma}\widehat{\gamma_{i}}+\delta_{\gamma^{2}}\widehat{\gamma_{i}}^{2}+\epsilon_{i}
\]
\[
\Delta G_{i}^{F}=-.08+.23^{***}\widehat{\Gamma_{i}}+\delta_{\gamma}\widehat{\gamma_{i}}+\delta_{\gamma^{2}}\widehat{\gamma_{i}}^{2}+\epsilon_{i}
\]
\subsection{Simulation comme instrument}
\[
\Delta G_{i}^{F}=\alpha_{0}+\beta_{\Gamma}\widehat{\Gamma_{i}}+\delta_{\gamma}\widehat{\gamma_{i}}+\delta_{\gamma^{2}}\widehat{\gamma_{i}}^{2}+\epsilon_{i}
\]
\[
\Delta A_{i}^{F}=\alpha_{0}+.51^{***}\widehat{\Delta G_{i}^{F}}+\delta_{\gamma}\widehat{\gamma_{i}}+\delta_{\gamma^{2}}\widehat{\gamma_{i}}^{2}+\epsilon_{i}
\]
\subsection{Biais d'update à la perte}
\[
U_{i}=\delta_{0}+\beta_{j}\bar{G}_{i}+\epsilon_{i}\,|\,\widehat{\Gamma_{i}}=G_{i}
\]
Among those who receive a feedback contradicting their subjective
gain, those who felt winner have 59 p.p. more chances to update correctly
their winning category (i.e. downards).
\[
U_{i}=\delta_{0}+.59^{***}\bar{G}_{i}+\epsilon_{i}\,|\,\widehat{\Gamma_{i}}\neq G_{i}
\]
\subsection{Biais à la perte}
Among those who should have updated, those who revised their subjective
gain upwards were disproportionately correct in doing so than those
whose revised it downards, indicating a bias towards loss when updating.
TODO: check how non affectés are managed; include those who shouldn't
update in the analysis.
\[
U_{i}=\alpha_{0}+.92^{***}G_{i}^{F}+\epsilon_{i}\,|\,\widehat{\Gamma_{i}}\neq G_{i}
\]
\subsection{Biais à la perte}
Those who felt winners update incorrectly disproportionately as compared
to those who felt losers, indicating a bias towards loss when updating.
\[
U_{i}=\alpha_{0}-.26^{***}G_{i}+\epsilon_{i}
\]
\subsection{Pas de biais à la perte}
6\% of those who felt unaffected start approving the reform after
the feedback, and learning to be a winner does not increase this figure
significantly.
TODO: rephrase \og start disapproving\fg{} because it can be \og stop
disapproving\fg .
\[
\Delta A_{i}^{F}=.05+.02\widehat{\Gamma_{i}}+\epsilon_{i}
\]
\subsection{Biais de confirmation}
\[
\Delta A_{i}^{F}=\delta_{0}+\beta_{j}G_{i}+\epsilon_{i}\,|\,\widehat{\Gamma_{i}}=j
\]
Among winners, those who felt winner have 6 p.p. more chances to start
approving the reform than the others; while among losers, those who
already felt loser have 6 p.p. \emph{less }chances to start approving
it. This looks like a confirmation bias, where hesitant winners (resp.
losers) may gain confidence that they win (resp. loose) through the
feedback, triggering them to approve (resp. to not approve) the reform
more than those who learn that they gain (resp. loose) from the reform.
\[
\Delta\bar{A}_{i}^{F}=\delta_{0}+.06^{**}\bar{G}_{i}+\epsilon_{i}\,|\,\widehat{\Gamma_{i}}=1
\]
\[
\Delta\bar{A}_{i}^{F}=\delta_{0}+.06^{.}G_{i}+\epsilon_{i}\,|\,\widehat{\Gamma_{i}}=0
\]
\section{Modèle adaptatif bayésien}
On fait l'hypothèse que
\[
\mathbb{P}_{i,t}\left(G_{i}>0\,|\,\mathbf{E}_{i}\right)=f\left(\underset{+}{\mathbb{E}\left[g_{i}\,|\,\mathbf{E}_{i}\right]}\right)=f\left(\gamma_{i}-b_{B\left(i\right)}\right)
\]
où $B\left(i\right)$ est la catégorie du répondant \emph{i, }et on
estime \emph{f}. On a le gain subjectif $g_{i}=\gamma_{i}-b_{B\left(i\right)}+\epsilon_{i}$
où l'erreur de l'individu \emph{i} par rapport au gain objectif $\gamma_{i}$
est $-b_{B\left(i\right)}+\epsilon_{i}$, avec $\mathbb{E}\left[\epsilon_{i}\right]=0$
(et en espérant que $\mathbb{E}\left[\epsilon_{i}\,|\,\Delta\widehat{E_{i}}\right]=0$).
(On pourrait faire dépendre \emph{$b_{B\left(i\right)}$ }de caractéristiques
observables dans une extension). On peut estimer le biais \emph{$b_{B\left(i\right)}$
}directement.
\begin{align*}
g_{i} & =\underset{\gamma_{i}}{\underbrace{110-\Delta E_{i}}}-b_{B\left(i\right)}+\epsilon_{i}\\
& =\underset{\widehat{\gamma_{i}}}{\underbrace{110-\Delta\widehat{E_{i}}}}+\iota_{i}-b_{B\left(i\right)}+\epsilon_{i}
\end{align*}
Le répondant sait qu'il commet une erreur $b_{B\left(i\right)}-\epsilon_{i}$,
qu'il croit d'espérance nulle (il pense que $b_{B\left(i\right)}=0$
\emph{a priori}). Il croit que nous estimons son gain à $\widehat{\gamma_{i}}+\epsilon_{i}=\gamma_{i}-\iota_{i}+\epsilon_{i}$,
et que nous commettons une erreur d'espérance $b_{B\left(i\right)}-\iota_{i}$
\emph{a priori }non nulle, où $\iota_{i}-b_{B\left(i\right)}$ est
son information partielle (liée à ses caractéristiques inobservées
par nous). On peut décomposer $b_{B\left(i\right)}-\iota_{i}$ entre
son biais $b_{B\left(i\right)}$ et notre erreur $\iota_{i}$ (dont
on peut estimer l'espérance dans une extension pour estimer \emph{b}
plus précisément). Le répondant sait qu'il peut être biaisé. Il sait
que l'écart $g_{i}-\widehat{\gamma_{i}}$ entre son estimation et
la nôtre est de $\iota_{i}-b_{B\left(i\right)}+\epsilon_{i}$, mais
ne sait pas quelle part de cet écart est dû à son information partielle
$\iota_{i}-b_{B\left(i\right)}$, et quelle part est due à son incertitude
$\epsilon_{i}$. L'incertitude du répondant $\epsilon_{i}$ est une
variable aléatoire centrée qui l'empêche de distinguer son biais \emph{$b_{i}$}
de notre erreur $\iota_{i}$.
Le feedback lui donne une information sur $\widehat{\gamma_{i}}$,
donc indirectement sur $\epsilon_{i}$. Après le feedback, il va réviser
son gain subjectif en
\[
g_{i}^{F}=\widehat{\gamma_{i}}+\iota_{i}-\left(\alpha_{B\left(i\right)}+\eta_{i}\right)b_{B\left(i\right)}+\epsilon_{i}
\]
$\alpha_{B\left(i\right)}\in\left[0;1\right]$ ssi les répondant dans
$B\left(i\right)$ révisent dans le bon sens. (On pourrait rendre
le nouveau bruit $\eta_{i}b_{B\left(i\right)}$ indépendant de $b_{B\left(i\right)}$,
à voir\emph{ }en fonction des données).
$g_{i}^{F}$ n'est pas observée, mais on peut l'estimer à partir de
l'estimation de \emph{f} (et \emph{en regroupant des individus similaires}):
\[
\widehat{g_{B\left(i\right)}^{F}}=\widehat{f}^{-1}\left(\mathbb{P}_{i,t+1}\left(G_{i}^{F}>0\right)\right)
\]
Le paramètre qui nous intéresse est $\alpha_{B}$, car il représente
l'ampleur de la révision effectuée par les répondants. On l'estime
en utilisant $g_{i}-g_{i}^{F}=b_{B\left(i\right)}\left(\alpha_{B\left(i\right)}+\eta_{i}-1\right)$
et en prenant la médiane de chaque terme:
\begin{align*}
\widehat{\alpha_{B}} & =1+\frac{g_{B}-\widehat{g_{B\left(i\right)}^{F}}}{b_{B}}
\end{align*}
où $g_{B}$ est la médiane des répondants dans la catégorie \emph{B}.
\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_
Tentative de cadre bayésien :
From the previous tables it appears that respondents do not fully
update their beliefs once they are given our feedback. We propose
a model to demonstrate that a majority of them do not follow Bayesian
rationality in their updating process. Let us assume that respondent
\emph{i} is uncertain about their actual gain and evaluates its distribution
to be$\mathcal{N}\left(g_{i},\sigma_{i}^{2}\right)$, where $\sigma_{i}$
is their uncertainty. This respondent \emph{believes} that our evaluation
of their gain follows the law $\widehat{\gamma_{i}}\,|\,\mathbf{E}_{i}\sim\mathcal{N}\left(g_{i}+b_{i},\sigma_{\gamma}^{2}\right)$,
where $b_{i}$ is our bias and $\sigma_{\gamma}^{2}$ is such that
we ---the surveyers--- make a correct feedback in 5 out of 6 cases:
$\mathbb{P}\left(\text{sgn}\left(\gamma_{i}\right)=\text{sgn}\left(\text{\ensuremath{\widehat{\gamma}}}_{i}\right)\right)=\frac{5}{6}$.
Hence, in their Bayesian reasoning, respondent \emph{i} believes that
$\mathbb{P}_{i}\left(\widehat{\gamma_{i}}<0\,|\,\mathbf{E}_{i}\right)=\Phi_{g_{i}+b_{i},\sigma_{\gamma_{i}}^{2}}\left(0\right)$.\footnote{Following usual convention, we denote $\Phi_{\mu,\sigma^{2}}$ the
c.d.f. of the normal distribution of mean $\mu$ and variance $\sigma^{2}$,
and $\Phi$ the c.d.f. of the standard normal distribution.} Denoting $\mathbb{P}_{i,0}$ (resp. $\mathbb{P}_{i,1}$) the subjective
probability measure of respondent \emph{i} before (resp. after) the
feedback, we have by Bayes rule:
{[}quick computations :{]}
\begin{align}
\mathbb{P}_{i,1}\left(\gamma_{i}<0\,|\,\widehat{\gamma_{i}}<0,\mathbf{E}_{i}\right) & =\mathbb{P}_{i,0}\left(\gamma_{i}<0\,|\,\mathbf{E}_{i}\right)\frac{\mathbb{P}_{i,0}\left(\widehat{\gamma_{i}}<0\,|\,\gamma_{i}<0,\mathbf{E}_{i}\right)}{\mathbb{P}_{i,0}\left(\widehat{\gamma_{i}}<0\,|\,\mathbf{E}_{i}\right)}\nonumber \\
& =\Phi_{g_{i},\sigma_{i}^{2}}\left(0\right)\frac{5/6}{\Phi_{g_{i}+b_{i},\sigma_{\gamma_{i}}^{2}}\left(0\right)}\nonumber \\
& =\frac{5}{6}\frac{\Phi\left(-\frac{g_{i}}{\sqrt{\sigma_{i}^{2}}}\right)}{\Phi\left(-\frac{g_{i}+b_{i}}{\sqrt{\sigma_{\gamma_{i}}^{2}}}\right)}\label{eq:Bayes-nv}
\end{align}
and
\begin{align*}
\mathbb{P}_{i,1}\left(\gamma_{i}>0\,|\,\widehat{\gamma_{i}}>0,\mathbf{E}_{i}\right) & =\ldots=\frac{5}{6}\frac{1-\Phi\left(-\frac{g_{i}}{\sqrt{\sigma_{i}^{2}}}\right)}{1-\Phi\left(-\frac{g_{i}+b_{i}}{\sqrt{\sigma_{\gamma_{i}}^{2}}}\right)}
\end{align*}
{[}detailed computations :{]}
\begin{align*}
\mathbb{P}_{i,1}\left(\gamma_{i}<0\,|\,\widehat{\gamma_{i}}<0,\mathbf{E}_{i}\right) & =\mathbb{P}_{i,0}\left(\gamma_{i}<0\,|\,\mathbf{E}_{i}\right)\frac{\mathbb{P}_{i,0}\left(\widehat{\gamma_{i}}<0\,|\,\gamma_{i}<0,\mathbf{E}_{i}\right)}{\mathbb{P}_{i,0}\left(\widehat{\gamma_{i}}<0\,|\,\mathbf{E}_{i}\right)}\\
& =\Phi_{g_{i},\sigma_{i}^{2}}\left(0\right)\frac{5/6}{\Phi_{g_{i}+b_{i},\sigma_{\gamma_{i}}^{2}}\left(0\right)}\\
& =\frac{5}{6}\frac{\Phi_{g_{i},\sigma_{i}^{2}}\left(0\right)}{\Phi_{g_{i}+b_{i},\sigma_{\gamma_{i}}^{2}}\left(0\right)}\\
& =\frac{5}{6}\frac{\sigma_{\gamma_{i}}}{\sigma_{i}}\frac{\intop_{-\infty}^{0}e^{-\frac{\left(x-g_{i}\right)^{2}}{2\sigma_{i}^{2}}}dx}{\intop_{-\infty}^{0}e^{-\frac{\left(x-g_{i}-b_{i}\right)^{2}}{2\sigma_{\gamma_{i}}^{2}}}dx}\\
& =\frac{5}{6}\frac{\sigma_{\gamma_{i}}}{\sigma_{i}}\frac{\intop_{-\infty}^{-g_{i}}e^{-\frac{u^{2}}{2\sigma_{i}^{2}}}du}{\intop_{-\infty}^{-g_{i}-b_{i}}e^{-\frac{u^{2}}{2\sigma_{\gamma_{i}}^{2}}}du}\\
& =\frac{5}{6}\frac{\sigma_{\gamma_{i}}}{\sigma_{i}}\frac{\sigma_{i}}{\sigma_{\gamma_{i}}}\frac{\intop_{-\infty}^{-\frac{g_{i}}{\sigma_{i}}}e^{-\frac{v^{2}}{2}}dv}{\intop_{-\infty}^{-\frac{g_{i}+b_{i}}{\sigma_{\gamma_{i}}}}e^{-\frac{v^{2}}{2}}dv}\\
& =\frac{5}{6}\frac{\Phi\left(-\frac{g_{i}}{\sigma_{i}}\right)}{\Phi\left(-\frac{g_{i}+b_{i}}{\sigma_{\gamma_{i}}}\right)}
\end{align*}
In this model, we have made the following assumptions: respondent
\emph{i} is Bayesian; s-he has a normal priori which s-he thinks is
unbiased; s-he thinks that our estimation of their gain is normally
distributed with a certain bias (possibly nil) and a certain variance;
and s-he believes our claim to provide a correct feedback in 5 out
of 6 cases. In order to go one step further, we need to make an additional
assumption on the value of the respondents' uncertainty $\sigma_{i}^{2}$.
We classify respondents into bins of similar estimated gain; we denote
$B\left(i\right)$ the bin of respondent \emph{i}, and we impose that
each bin \emph{B} is homogenous in the sign of the gain: $\forall i,j,B\left(i\right)=B\left(j\right)=B\implies\widehat{\Gamma_{B}}:=\widehat{\Gamma_{i}}=\widehat{\Gamma_{j}}$\emph{.}
We assume that respondents with similar gain do not differ in their
uncertainty $\sigma_{B\left(i\right)}^{2}$, somewhat known to them.
Thus, we can see the answers of respondents in bin \emph{$B\left(i\right)$
}as different realizations of the same normal distribution with variance
$\sigma_{B\left(i\right)}^{2}$, and we identify $\sigma_{B\left(i\right)}^{2}$
with the sample variance of gain for respondents in bin \emph{$B\left(i\right)$}.
Similarly, the share of respondents in bin $B\left(i\right)$ whose
winning category after feedback coincides with our feedback, can be
identified with $\mathbb{P}_{i,1}\left(\gamma_{i}<0\,|\,\widehat{\gamma_{i}}<0,\mathbf{E}_{i}\right)$
(or $\mathbb{P}_{i,1}\left(\gamma_{i}<0\,|\,\widehat{\gamma_{i}}<0,\mathbf{E}_{i}\right)$,
depending on the bin of estimated gain): $\mathbb{P}\left(G_{i}^{F}<0\,|\,\widehat{\gamma_{i}}<0,\,B\left(i\right)=B\right)\overset{\text{hyp.}}{=}\mathbb{P}_{i,1}\left(\gamma_{i}<0\,|\,\widehat{\gamma_{i}}<0,\mathbf{E}_{i}\right)$.
With these additional assumptions, we can retrieve from equation (\ref{eq:Bayes-nv}):
\begin{equation}
\phi_{i}:=\Phi\left(-\frac{g_{i}+b_{i}}{\sqrt{\sigma_{\gamma_{i}}^{2}}}\right)=\begin{cases}
\frac{5}{6}\frac{\Phi\left(-\frac{g_{i}}{\sqrt{\sigma_{B\left(i\right)}^{2}}}\right)}{\mathbb{P}\left(G_{i}^{F}<0\,|\,\widehat{\gamma_{i}}<0,\,B\left(i\right)=B\right)} & \text{for \emph{B} s.t. }\widehat{\Gamma_{B}}>0\\
1-\frac{5}{6}\frac{1-\Phi\left(-\frac{g_{i}}{\sqrt{\sigma_{B\left(i\right)}^{2}}}\right)}{\mathbb{P}\left(G_{i}^{F}<0\,|\,\widehat{\gamma_{i}}<0,\,B\left(i\right)=B\right)} & \text{for \emph{B} s.t. }\widehat{\Gamma_{B}}<0
\end{cases}\label{eq:Bayes-phi}
\end{equation}
As $\phi_{i}$ must belong to $\left[0;1\right]$, we should conclude
that one of our assumption is false when $\phi_{i}\notin\left[0;1\right]$.
We think that the least credible assumption is that respondent \emph{i}
is Bayesian, and hence interpret $\phi_{i}\notin\left[0;1\right]$
as evidence that respondent \emph{i} is not Bayesian.
Forming 8 bins of equal sizes (including two with negative gains)
defined using quantiles of simulated gains, we observe that for all
bin \emph{B} $\sqrt{\sigma_{B}^{2}}\in\left[112;148\right]$.\footnote{Actually, $\sqrt{\sigma_{B}^{2}}=148$ is an outlier and corresponds
to the bin of lowest gains. All other variances are in $\left[112;125\right]$.} Taking a strict definition of agreeing with our feedback (i.e. excluding
those who think they are unaffected from this definition), we find
that 73\% of the respondents are not Bayesians. Taking a loose definition,
there are still 37\%. As this tests for \emph{not }having a Bayesian
reasoning, failing to flag a respondent as non Bayesian does not mean
that s-he has a Bayesian reasoning, because the only way to reconcile
this respondent with Bayesianism is generally to attribute them an
extreme prior. To see this, we can make one last assumption: that
respondents believe that we ---the surveyers--- are unbiased: $b_{i}=0$.
Using this assumption, we can now identify $\sqrt{\sigma_{\gamma_{i}}^{2}}$
from equation (\ref{eq:Bayes-phi}) we have that: $\sqrt{\sigma_{\gamma_{i}}^{2}}=-\frac{g_{i}}{\Phi^{-1}\left(\phi_{i}\right)}$.
Among those for which we failed to reject Bayesianism with the previous
test, 10\% obtain a negative value for $\sqrt{\sigma_{\gamma_{i}}^{2}}$
(14\% if we take the loose definition above), which is another indication
of non Bayesianism.
Combining these two inconsistencies with Bayesianism, we thus find
76\% of non Bayesians (or 46\% if we accept the loose definition).
For respondents that can be thought of Bayesians, we find an average
$\sqrt{\sigma_{\gamma_{i}}^{2}}$ of 183 (181 in the loose definition).
The fact that $\sqrt{\sigma_{\gamma_{i}}^{2}}>\sqrt{\sigma_{B}^{2}}$
shows that Bayesian respondents thought that they knew their gain
more precisely than us, which is consistent with the tendency to update
little their beliefs.
On pourrait estimer $\sigma_{i}$ en regardant la variance des estimations
pour une catégorie de gain objectif donnée. On estimerait aussi le
membre de gauche pour cette même catégorie de gain objectif. On en
déduirait alors l'écart-type $\sigma_{\gamma_{i}}$ qui rendrait cohérent
le raisonnement bayésien de l'individu \emph{i }: il peut croire que
notre écart-type est bien plus grand que le sien, i.e. que son estimation
est plus précise, ce qui justifierait de réviser peu ses croyances.
Si $\Phi\left(-\frac{g_{i}}{\sigma_{\gamma_{i}}}\right)=\frac{5}{6}\frac{\Phi\left(-\frac{g_{i}}{\sigma_{i}}\right)}{\widetilde{\mathbb{P}}\left(\gamma_{i}<0\,|\,\widehat{\gamma_{i}}<0,\mathbf{E}_{i}\right)}\notin\left[0;1\right]$,
on en déduit que l'individu \emph{i }n'est pas bayésien. Le problème
de ce modèle est qu'on exclut les biais, or l'individu \emph{i} pourrait
penser que notre estimation est biaisée. Introduisons donc un biais
$b_{i}$.
\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_
L'individu \emph{i }évalue la distribution de son gain à $\mathcal{N}\left(g_{i},\sigma_{i}^{2}\right)$.
Notre évaluation de son gain suit la loi $\widehat{\gamma_{i}}\,|\,\mathbf{E}_{i}\sim\mathcal{N}\left(\gamma_{i},\sigma_{\gamma}^{2}\right)$,
où $\sigma_{\gamma}^{2}$ est tel que $\mathbb{P}\left(\text{sgn}\left(\gamma_{i}\right)=\text{sgn}\left(\text{\ensuremath{\widehat{\gamma}}}_{i}\right)\right)=\frac{5}{6}$.
Ainsi, parmi les individus au gain $\gamma_{i}$, pour lesquels nous
connaissons $\mathbf{E}_{i}$, nous donnerons 5 fois sur 6 un feedback
juste. Dans le raisonnement bayésien, l'individu croit que $g_{i}=\gamma_{i}$
et donc que $\widetilde{\mathbb{P}}\left(\widehat{\gamma_{i}}<0\,|\,\mathbf{E}_{i}\right)=\Phi_{g_{i},\sigma_{\gamma_{i}}}\left(0\right)$.
\begin{align*}
\widetilde{\mathbb{P}}\left(\gamma_{i}<0\,|\,\widehat{\gamma_{i}}<0,\mathbf{E}_{i}\right) & =\widetilde{\mathbb{P}}\left(\gamma_{i}<0\,|\,\mathbf{E}_{i}\right)\frac{\widetilde{\mathbb{P}}\left(\widehat{\gamma_{i}}<0\,|\,\gamma_{i}<0,\mathbf{E}_{i}\right)}{\widetilde{\mathbb{P}}\left(\widehat{\gamma_{i}}<0\,|\,\mathbf{E}_{i}\right)}\\
& =\Phi_{g_{i},\sigma_{i}^{2}}\left(0\right)\frac{5/6}{\Phi_{g_{i},\sigma_{\gamma_{i}}^{2}}\left(0\right)}\\
& =\frac{5}{6}\frac{\Phi_{g_{i},\sigma_{i}^{2}}\left(0\right)}{\Phi_{g_{i},\sigma_{\gamma_{i}}^{2}}\left(0\right)}\\
& =\frac{5}{6}\frac{\sigma_{\gamma_{i}}}{\sigma_{i}}\frac{\intop_{-\infty}^{0}e^{-\frac{\left(x-g_{i}\right)^{2}}{2\sigma_{i}^{2}}}dx}{\intop_{-\infty}^{0}e^{-\frac{\left(x-g_{i}\right)^{2}}{2\sigma_{\gamma_{i}}^{2}}}dx}\\
& =\frac{5}{6}\frac{\sigma_{\gamma_{i}}}{\sigma_{i}}\frac{\intop_{-\infty}^{-g_{i}}e^{-\frac{u^{2}}{2\sigma_{i}^{2}}}du}{\intop_{-\infty}^{-g_{i}}e^{-\frac{u^{2}}{2\sigma_{\gamma_{i}}^{2}}}du}\\
& =\frac{5}{6}\frac{\sigma_{\gamma_{i}}}{\sigma_{i}}\frac{\sigma_{i}}{\sigma_{\gamma_{i}}}\frac{\intop_{-\infty}^{-\frac{g_{i}}{\sigma_{i}}}e^{-\frac{v^{2}}{2}}dv}{\intop_{-\infty}^{-\frac{g_{i}}{\sigma_{\gamma_{i}}}}e^{-\frac{v^{2}}{2}}dv}\\
& =\frac{5}{6}\frac{\Phi\left(-\frac{g_{i}}{\sigma_{i}}\right)}{\Phi\left(-\frac{g_{i}}{\sigma_{\gamma_{i}}}\right)}
\end{align*}
On pourrait estimer $\sigma_{i}$ en regardant la variance des estimations
pour une catégorie de gain objectif donnée. On estimerait aussi le
membre de gauche pour cette même catégorie de gain objectif. On en
déduirait alors l'écart-type $\sigma_{\gamma_{i}}$ qui rendrait cohérent
le raisonnement bayésien de l'individu \emph{i }: il peut croire que
notre écart-type est bien plus grand que le sien, i.e. que son estimation
est plus précise, ce qui justifierait de réviser peu ses croyances.
Si $\Phi\left(-\frac{g_{i}}{\sigma_{\gamma_{i}}}\right)=\frac{5}{6}\frac{\Phi\left(-\frac{g_{i}}{\sigma_{i}}\right)}{\widetilde{\mathbb{P}}\left(\gamma_{i}<0\,|\,\widehat{\gamma_{i}}<0,\mathbf{E}_{i}\right)}\notin\left[0;1\right]$,
on en déduit que l'individu \emph{i }n'est pas bayésien. Le problème
de ce modèle est qu'on exclut les biais, or l'individu \emph{i} pourrait
penser que notre estimation est biaisée. Introduisons donc un biais
$\iota_{i}$.
\begin{align*}
\widetilde{\mathbb{P}}\left(\gamma_{i}<0\,|\,\widehat{\gamma_{i}}<0,\mathbf{E}_{i}\right) & =\widetilde{\mathbb{P}}\left(\gamma_{i}<0\,|\,\mathbf{E}_{i}\right)\frac{\widetilde{\mathbb{P}}\left(\widehat{\gamma_{i}}<0\,|\,\gamma_{i}<0,\mathbf{E}_{i}\right)}{\widetilde{\mathbb{P}}\left(\widehat{\gamma_{i}}<0\,|\,\mathbf{E}_{i}\right)}\\
& =\Phi_{g_{i},\sigma_{i}^{2}}\left(0\right)\frac{5/6}{\Phi_{g_{i}+\iota_{i},\sigma_{\gamma_{i}}^{2}}\left(0\right)}\\
& =\frac{5}{6}\frac{\Phi_{g_{i},\sigma_{i}^{2}}\left(0\right)}{\Phi_{g_{i}+\iota_{i},\sigma_{\gamma_{i}}^{2}}\left(0\right)}\\
& =\frac{5}{6}\frac{\sigma_{\gamma_{i}}}{\sigma_{i}}\frac{\intop_{-\infty}^{0}e^{-\frac{\left(x-g_{i}\right)^{2}}{2\sigma_{i}^{2}}}dx}{\intop_{-\infty}^{0}e^{-\frac{\left(x-g_{i}-\iota_{i}\right)^{2}}{2\sigma_{\gamma_{i}}^{2}}}dx}\\
& =\frac{5}{6}\frac{\sigma_{\gamma_{i}}}{\sigma_{i}}\frac{\intop_{-\infty}^{-g_{i}}e^{-\frac{u^{2}}{2\sigma_{i}^{2}}}du}{\intop_{-\infty}^{-g_{i}-\iota_{i}}e^{-\frac{u^{2}}{2\sigma_{\gamma_{i}}^{2}}}du}\\
& =\frac{5}{6}\frac{\sigma_{\gamma_{i}}}{\sigma_{i}}\frac{\sigma_{i}}{\sigma_{\gamma_{i}}}\frac{\intop_{-\infty}^{-\frac{g_{i}}{\sigma_{i}}}e^{-\frac{v^{2}}{2}}dv}{\intop_{-\infty}^{-\frac{g_{i}+\iota_{i}}{\sigma_{\gamma_{i}}}}e^{-\frac{v^{2}}{2}}dv}\\
& =\frac{5}{6}\frac{\Phi\left(-\frac{g_{i}}{\sigma_{i}}\right)}{\Phi\left(-\frac{g_{i}+\iota_{i}}{\sigma_{\gamma_{i}}}\right)}
\end{align*}
On ne peut plus identifier la distribution que l'individu \emph{i}
nous attribue car il y a désormais deux inconnues : $\sigma_{\gamma_{i}}$
et $\iota_{i}$. Néanmoins, le critère reste le même pour savoir si
le répondant n'est pas bayésien : $\frac{5}{6}\frac{\Phi\left(-\frac{g_{i}}{\sigma_{i}}\right)}{\widetilde{\mathbb{P}}\left(\gamma_{i}<0\,|\,\widehat{\gamma_{i}}<0,\mathbf{E}_{i}\right)}\notin\left[0;1\right]$.
\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_
\begin{align*}
\mathbb{P}_{i,t+1}\left(G_{i}^{F}>0\,|\,\widehat{\Gamma_{i}}>0,\mathbf{E}_{i}\right) & =f^{F}\left(\mathbb{E}\left[g_{i}^{F}\,|\,\mathbf{E}_{i}\right]\right)\\
& =f^{F}\left(\gamma_{i}-\alpha b\right)\\
\mathbb{P}_{i,t+1}\left(G_{i}^{F}>0\,|\,\widehat{\Gamma_{i}}>0,\mathbf{E}_{i}\right) & =\frac{\mathbb{P}_{i,t}\left(G_{i}^{F}>0\,|\,\mathbf{E}_{i}\right)\mathbb{P}_{i,t}\left(\widehat{\Gamma_{i}}>0\,|\,G_{i}^{F}>0,\mathbf{E}_{i}\right)}{\mathbb{P}_{i,t}\left(\widehat{\Gamma_{i}}>0\,|\,\mathbf{E}_{i}\right)}\\
& =\frac{f^{F}\left(\gamma_{i}-b\right)\mathbb{P}_{i,t}\left(\widehat{\Gamma_{i}}>0\,|\,G_{i}^{F}>0,\mathbf{E}_{i}\right)}{\mathbb{P}_{i,t}\left(\widehat{\Gamma_{i}}>0\,|\,\mathbf{E}_{i}\right)}\\
& =\frac{f^{F}\left(\gamma_{i}-b\right)\mathbb{P}_{i,t}\left(\widehat{\Gamma_{i}}>0\,|\,\mathbf{E}_{i}\right)}{\mathbb{P}_{i,t}\left(\widehat{\Gamma_{i}}>0\,|\,\mathbf{E}_{i}\right)}\\
& \overset{\text{hyp.}}{=}\frac{f^{F}\left(\gamma_{i}-b\right)\mathbb{P}_{i,t}\left(\widehat{\Gamma_{i}}>0\,|\,G_{i}^{F}>0,\mathbf{E}_{i}\right)}{\mathbb{P}_{i,t}\left(\Gamma_{i}>0\,|\,\mathbf{E}_{i}\right)}\\
\\
\widetilde{\mathbb{P}}_{i,t}\left(G_{i}>0\,|\,\mathbf{E}_{i}\right) & =f\left(g_{i}\right)\\
\widetilde{\mathbb{P}}_{i,t+1}\left(\Gamma_{i}>0\,|\,\widehat{\Gamma_{i}}>0,\mathbf{E}_{i}\right) & =\widetilde{\mathbb{P}}_{i,t}\left(\Gamma_{i}>0\,|\,\mathbf{E}_{i}\right)\frac{\widetilde{\mathbb{P}}_{i,t}\left(\widehat{\Gamma_{i}}>0\,|\,\Gamma_{i}>0,\mathbf{E}_{i}\right)}{\widetilde{\mathbb{P}}_{i,t}\left(\widehat{\Gamma_{i}}>0\,|\,\mathbf{E}_{i}\right)}\\
\widetilde{\mathbb{P}}_{i,t+1}\left(\gamma_{i}>0\,|\,\widehat{\gamma_{i}}>0,\mathbf{E}_{i}\right) & =\widetilde{\mathbb{P}}_{i,t}\left(\gamma_{i}>0\,|\,\mathbf{E}_{i}\right)\frac{\widetilde{\mathbb{P}}_{i,t}\left(\widehat{\gamma_{i}}>0\,|\,\gamma_{i}>0,\mathbf{E}_{i}\right)}{\widetilde{\mathbb{P}}_{i,t}\left(\widehat{\gamma_{i}}>0\,|\,\mathbf{E}_{i}\right)}\\
\mathbb{P}_{i,t+1}\left(\gamma_{i}>0\,|\,\widehat{\gamma_{i}}+\epsilon_{i}>0,\mathbf{E}_{i}\right) & =\widetilde{\mathbb{P}}_{i,t}\left(\gamma_{i}>0\,|\,\mathbf{E}_{i}\right)\frac{\mathbb{P}_{i,t}\left(\widehat{\gamma_{i}}+\epsilon_{i}>0\,|\,\gamma_{i}>0,\mathbf{E}_{i}\right)}{\mathbb{P}_{i,t}\left(\widehat{\gamma_{i}}+\epsilon_{i}>0\,|\,\mathbf{E}_{i}\right)}\\
& \overset{\text{si trust}}{=}\frac{5}{6}\frac{\mathbb{P}_{i,t}\left(\gamma_{i}+\iota_{i}+\epsilon_{i}>0\,|\,\gamma_{i}>0,\mathbf{E}_{i}\right)}{\mathbb{P}_{i,t}\left(\widehat{\gamma_{i}}+\epsilon_{i}>\,|\,\mathbf{E}_{i}\right)}\\
& =\frac{5}{6}\frac{\mathbb{P}_{i,t}\left(\iota_{i}+\epsilon_{i}>-\gamma_{i}\,|\,0>-\gamma_{i},\mathbf{E}_{i}\right)}{\mathbb{P}_{i,t}\left(\iota_{i}+\epsilon_{i}>-\gamma_{i}\right)}\\
\\
& =\frac{5}{6}\frac{\mathbb{P}_{i,t}\left(g_{i}>-\left(\epsilon_{i}+\iota_{i}\right)\,|\,g_{i}>-\epsilon_{i},\mathbf{E}_{i}\right)}{\mathbb{P}_{i,t}\left(g_{i}>-\left(\epsilon_{i}+\iota_{i}\right)\right)}\\
& =\frac{5}{6}\frac{\mathbb{P}_{i,t}\left(g_{i}+\iota_{i}>-\epsilon_{i}\,|\,g_{i}>-\epsilon_{i},\mathbf{E}_{i}\right)}{\Phi_{b_{i},\sigma_{\epsilon}^{2}+\sigma_{\iota}^{2}}\left(g_{i}\right)}\\
\\
g_{i} & =\gamma_{i}-b_{B\left(i\right)}+\epsilon_{i}\\
g_{i}+\iota_{i}+b_{B\left(i\right)} & =\widehat{\gamma_{i}}+\epsilon_{i}\\
& \mathbb{P}_{i,t+1}\left(g_{i}^{F}>0\,|\,\widehat{\Gamma_{i}}>0\right)\\
\\
\mathbb{P}\left(X+Y<a\,|\,X<a\right)\mathbb{P}\left(X<a\right) & =\int_{-\infty}^{a}\left(\int_{-\infty}^{+\infty}\varphi\left(y\right)\boldsymbol{1}_{x+y<a}dy\right)\varphi\left(x\right)dx\\
& =\frac{1}{2\pi\sqrt{\sigma_{X}^{2}\sigma_{Y}^{2}}}\int_{-\infty}^{a}\int_{-\infty}^{+\infty}e^{-\frac{x^{2}}{2\sigma_{X}^{2}}-\frac{y^{2}}{2\sigma_{Y}^{2}}}\boldsymbol{1}_{x+y<a}dydx\\
\mathbb{P}\left(X+Y<a\,|\,X<a\right)\\
\\
\\
\\
& =\int_{-\infty}^{a}\left(\int_{-\infty}^{a-x}\varphi\left(y\right)dy\right)\varphi\left(x\right)dx\\
& =\int_{-\infty}^{a}\Phi_{Y}\left(a-x\right)\varphi_{X}\left(x\right)dx\\
& =\frac{1}{2\sqrt{2\pi\sigma_{X}^{2}}}\int_{-\infty}^{a}\left(1+\text{erf}\left(\frac{a-x}{\sigma_{Y}\sqrt{2}}\right)\right)e^{-\frac{x^{2}}{2\sigma_{X}^{2}}}dx\\
& =\frac{1}{2\sqrt{2\pi\sigma_{X}^{2}}}\int_{-\infty}^{a}\left(1+\frac{2}{\sqrt{\pi}}\int_{0}^{\frac{a-x}{\sigma_{Y}\sqrt{2}}}e^{-t^{2}}dt\right)e^{-\frac{x^{2}}{2\sigma_{X}^{2}}}dx\\
& =\frac{\Phi_{X}\left(a\right)}{2\sqrt{2\pi\sigma_{X}^{2}}}+\frac{2}{\sqrt{\pi}}\int_{-\infty}^{a}e^{-\frac{x^{2}}{2\sigma_{X}^{2}}}\left(\int_{0}^{\frac{a-x}{\sigma_{Y}\sqrt{2}}}e^{-t^{2}}dt\right)dx\\
\\
\\
\end{align*}
$g_{i}-\gamma_{i}=\epsilon_{i}\tilde{\sim}\mathcal{N}\left(b,\sigma^{2}\right)$,
$\widehat{\gamma_{i}}-\epsilon_{i}\tilde{\sim}\mathcal{N}\left(b-\iota_{i},\sigma^{2}\right)$,
$b-\epsilon_{i}\tilde{\sim}\mathcal{N}\left(0,\sigma^{2}\right)$TODO:
mettre ça dans un cadre bayésien (le 5/6 n'intervient pas par exemple!).
Pistes : $\widetilde{\mathbb{P}}\left(\Gamma>0\right)\overset{\text{hyp.}}{=}\widetilde{\mathbb{P}}\left(\widehat{\Gamma}>0\right)\implies\widetilde{\mathbb{P}}\left(\widehat{\Gamma}>0\,|\,\Gamma>0\right)=\widetilde{\mathbb{P}}\left(\Gamma>0\,|\,\widehat{\Gamma}>0\right)=\frac{5}{6}$;
$\widetilde{\mathbb{P}}\left(\Gamma>0\right)$ peut être estimé/encadré
à partir de \emph{G }et $L^{\mathring{p}}$; $\widetilde{\mathbb{P}}\left(g_{i}^{F}>0\,|\,\widehat{\Gamma}>0\right)=\widetilde{\mathbb{P}}\left(\widehat{\gamma_{i}}+\iota_{i}-\left(\alpha+\eta_{i}\right)b>-\epsilon_{i}\,|\,\widehat{\gamma_{i}}>0\right)$...;
le répondant est certain quand il croit que $\text{Var}\left(\epsilon_{i}\right)$
est faible, mais alors après un feedback infirmant, il devrait soit
réviser son estimation fortement (car il se rend compte qu'un $\left|\epsilon_{i}\right|$
élevé est improbable): réviser \emph{b}, soit ça signifie qu'il croit
que notre estimation est fortement biaisée ($\widehat{\gamma_{i}}+\iota_{i}$,
avec $\left|\iota_{i}\right|$ élevé): réviser $\iota_{i}$ (cela
dénote une irrationalité ou un manque de confiance en nous, car la
révision devrait être vers $\iota_{i}=0$); on faisait l'hypothèse
que le répondant connaissait $\widehat{\gamma_{i}}$ mais on devrait
peut-être rajouter une erreur sur ce terme (ou interpréter $\iota_{i}$
ainsi).
\end{document}