You selected dse0.pl


title(A):-
A=[
'% Modeling Signaling Games by Prolog : '
,'% Bayesian Perfect Equilibria and Dempster-Shafer Equilibria'
,'% file: dse0.pl (for SWI-Prolog 5.0.9)'
,'% created: 20--24 Jun 2004.(a modified version of gbayes01 augmented with beleq03)'
,'% previous version: gbayes01.pl(26 Dec 2003), beleq03.pl(18 Mar 2004)'
,'% modified: 13,14 Jul 2004. (position of dynamic for SWI-Prolog 5.2.13. )'
,'% ---------------------------'
,'% main: consistency_test/0.'
,'% display_consistency_results(yes).'
,'% display_consistency_results(all).'
,'% figure.'
].

reference(A):-
A=[
'% references:',
'% [1] R. Gibbons (1992). Game Theory for Applied Economics.',
'% [2] S. Muto (2001). Gemu Riron Nyuumon. Nikkei Bunko.(Japanese)',
'% [3] J. Eichberger and D. Kelsey (1999). Education signalling and uncertainty. In M.J. Machina and B. Munier(eds.), Beliefs, Interactions and Preferences in Decision Making, pp.135-157. Kluwer Academic.',
'% [4] M.J. Ryan (2002). Violations of belief persistence in Dempster-Shafer equilibrium, Games and Economic Behaviour 39:167-174.'
].

figure:-
   Figure=[
     '%           1,1               0,1  ',
     '%            |U       1        |U         ',
     '%            |    L  [t1]   R  |          ',
     '%  1,0 +-----+--------+--------+------+ 0,4 ',
     '%        D   :        |        :   D      ',
     '%            :        |[0.5]   :          ',
     '%            2       [ ] N     2          ',
     '%            :        |[0.5]   :          ',
     '%            :   L    |   R    :          ',
     '%  0,1 +-----+--------+--------+------+ 1,1',
     '%        U   |        1        |   U       ',
     '%            |D     [t2]       |D          ',
     '%           0,4               1,0  ',
     '% Fig. A signaling game of Ryan(2002). ',
     '% Player 1 is Nature. The payoffs above are of Player 2 and Player 3.'
   ],
   forall(member(L,Figure),(nl,write(L))).



%=====================================
% Signaling games
%=====================================
% descriptive modeling.

player(player_1).
player(player_2).

possible_type_of(player_1, type_1).
possible_type_of(player_1, type_2).

% possible type should be irrelevant to strategy space.
%pure_strategy_of(player_1:T,left):-possible_type_of(player_1, T).
%pure_strategy_of(player_1:T,right):-possible_type_of(player_1, T).
pure_strategy_of(player_1,left).
pure_strategy_of(player_1,right).
pure_strategy_of(player_2,up).
pure_strategy_of(player_2,down).

% The player_2's mental model what hypothesizes player_1's decision rule. 

:- dynamic strategy_of/4.

strategy_of(player_1, T,right,Q):-
   strategy_of(player_1, T,left,P),
   Q is 1 - P.

strategy_of(player_1, type_1,left,1).
strategy_of(player_1, type_2,left,1).


% randomized strategies
%-----------------------------
possible_strategy_of(player_1, type_1,left,P):-
%   member(P,[0,1]).
%alternatives:
%   member(P,[0,0.5,1]).
   probability(0.1,P).
%   probabilities(2,[W,_]),P is W/100.

possible_strategy_of(player_1, type_2,left,P):-
%   member(P,[0,1]).
%   member(P,[0,0.5,1]).
   probability(0.5,P).

possible_strategy_of(player_2, down,P):-
   member(P,[0,1]).
%   member(P,[0,0.5,1]).
%   probability(0.5,P).


%=====================================
% beliefs and the updating rule
%=====================================

%-------------------------------------------
% (1) probabilistic (Bayesian) model
%-------------------------------------------
prior_probability_of(type_1,0.5).
%prior_probability_of(type_1,0.75).
%prior_probability_of(type_1,1/3).
%prior_probability_of(type_1,0.999999999999).
%prior_probability_of(type_1,1).

prior_probability_of(type_2,Q):-
   prior_probability_of(type_1,P),
   Q is 1 - P.

knowledge(upper_node->player_2 is type_1).
knowledge(lower_node->player_2 is type_2).

bayes_updating_of(player_2, 
  P->(Q=B/B0),
  at(Node:T),
  observing(LeftOrRight),
  reason(prob(B,for(T)),prob(BN,if_not))
 ):-
   strategy_of(player_1, T,LeftOrRight,S),
   knowledge(Node->player_2 is T),
   collect_probs_of_consistent_states(LeftOrRight,_,B0),
   prior_probability_of(T,P),
   bayes_rule(B0,P,S,B,Q),
   BN is B0 - B.


bayes_rule(B0,P,S,B,Q):-
   B0 > 0,
   B is P * S,
   Q is B / B0.

bayes_rule(0,_,_,0.5,0.5). % posterior of measure-0 event.

collect_probs_of_consistent_states(LeftOrRight,BL,B0):-
   findall(B1,
     (
      strategy_of(player_1, T1,LeftOrRight,P1),
      prior_probability_of(T1,S1),
      B1 is P1 * S1
     ),
   BL),
   sum(BL,B0).


/*
% for the entry-deterence model (#4)

?- bayes_updating_of(player_2, Update,Where,Data,Why).

Update = 0.5->0=0/0.5
Where = at(upper_node:type_1)
Data = observing(left)
Why = reason(prob(0, for(type_1)), prob(0.5, if_not)) ;

Update = 0.5->1=0.5/0.5
Where = at(lower_node:type_2)
Data = observing(left)
Why = reason(prob(0.5, for(type_2)), prob(0, if_not)) ;

Update = 0.5->1=0.5/0.5
Where = at(upper_node:type_1)
Data = observing(right)
Why = reason(prob(0.5, for(type_1)), prob(0, if_not)) ;

Update = 0.5->0=0/0.5
Where = at(lower_node:type_2)
Data = observing(right)
Why = reason(prob(0, for(type_2)), prob(0.5, if_not)) ;

No
?- 
*/



%=====================================
% payoffs for pure strategy profile
%=====================================

payoff([_, _, _], nature, 0).

%/*
% #1
%--- a signaling game in Ryan(2002)
payoff([type_1, left, up], player_1, 1).
payoff([type_1, left, up], player_2, 1).
payoff([type_1, left, down], player_1, 1).
payoff([type_1, left, down], player_2, 0).
payoff([type_1, right, up], player_1, 0).
payoff([type_1, right, up], player_2, 1).
payoff([type_1, right, down], player_1, 0).
payoff([type_1, right, down], player_2, 4).
payoff([type_2, left, up], player_1, 0).
payoff([type_2, left, up], player_2, 1).
payoff([type_2, left, down], player_1, 0).
payoff([type_2, left, down], player_2, 4).
payoff([type_2, right, up], player_1, 1).
payoff([type_2, right, up], player_2, 1).
payoff([type_2, right, down], player_1, 1).
payoff([type_2, right, down], player_2, 0).
%*/



/*
% #2
%---a signaling game in Gibbons(1992), figure 4.4.2.
payoff([type_1, left, up], player_1, 3).
payoff([type_1, left, up], player_2, 2).
payoff([type_1, left, down], player_1, 2).
payoff([type_1, left, down], player_2, 0).
payoff([type_1, right, up], player_1, 1).
payoff([type_1, right, up], player_2, 0).
payoff([type_1, right, down], player_1, 0).
payoff([type_1, right, down], player_2, 1).
payoff([type_2, left, up], player_1, 1).
payoff([type_2, left, up], player_2, 0).
payoff([type_2, left, down], player_1, 1).
payoff([type_2, left, down], player_2, 1).
payoff([type_2, right, up], player_1, 2).
payoff([type_2, right, up], player_2, 1).
payoff([type_2, right, down], player_1, 0).
payoff([type_2, right, down], player_2, 0).
*/

/*
% #3
%---a signaling game in Gibbons(1992), figure 4.2.2.
payoff([type_1, left, up], player_1, 1).
payoff([type_1, left, up], player_2, 3).
payoff([type_1, left, down], player_1, 4).
payoff([type_1, left, down], player_2, 0).
payoff([type_1, right, up], player_1, 2).
payoff([type_1, right, up], player_2, 1).
payoff([type_1, right, down], player_1, 1).
payoff([type_1, right, down], player_2, 0).
payoff([type_2, left, up], player_1, 2).
payoff([type_2, left, up], player_2, 4).
payoff([type_2, left, down], player_1, 0).
payoff([type_2, left, down], player_2, 1).
payoff([type_2, right, up], player_1, 1).
payoff([type_2, right, up], player_2, 0).
payoff([type_2, right, down], player_1, 1).
payoff([type_2, right, down], player_2, 2).
*/


/*
% #4
%--- an entry-deterence game in Muto(2000).
payoff([type_1, right, up], player_1, 0).
payoff([type_1, right, up], player_2, 5).
payoff([type_1, right, down], player_1, 0).
payoff([type_1, right, down], player_2, 5).
payoff([type_1, left, up], player_1, 2).
payoff([type_1, left, up], player_2, 3).
payoff([type_1, left, down], player_1, -2).
payoff([type_1, left, down], player_2, 1).
payoff([type_2, right, up], player_1, 0).
payoff([type_2, right, up], player_2, 5).
payoff([type_2, right, down], player_1, 0).
payoff([type_2, right, down], player_2, 5).
payoff([type_2, left, up], player_1, 3).
payoff([type_2, left, up], player_2, 0).
payoff([type_2, left, down], player_1, 1).
payoff([type_2, left, down], player_2, 1).
*/


%================================
% (Standard) expected utilities
%================================

average([Q,Ud,Uc],U=Ueq):-
   W1 is integer(Q * 100),
   W2 is integer((1-Q) * 100),
   % to avoide nemerical error.
   Ueq = (W1 * Ud + W2 * Uc) /100,
   U is Ueq. 

% first order averages

expected_utility_0(player_2, if(T,LOR),down(Q),utility(U=Ueq)):-
   payoff([T, LOR, down], player_2, Ud),
   payoff([T, LOR, up], player_2, Uc),
   possible_strategy_of(player_2, down,Q),
   average([Q,Ud,Uc],U=Ueq).

expected_utility_0(player_1, (T,LOR),if(belief(type_1:P),down(Q)),utility(U=Ueq)):-
   rational_choice_of(player_2, if(LOR),belief(type_1:P),down(Q),utility(_)),
   payoff([T, LOR, down], player_1, Ud),
   payoff([T, LOR, up], player_1, Uc),
   average([Q,Ud,Uc],U=Ueq).

% second order averages

expected_utility_of(player_2, if(LOR),belief(type_1:P),down(Q),utility(U=Ueq)):-
   bayes_updating_of(player_2, _->(P=_),at(_:type_1),observing(LOR),_),
   expected_utility_0(player_2, if(type_1,LOR),down(Q),utility(_=Uw/100)),
   expected_utility_0(player_2, if(type_2,LOR),down(Q),utility(_=Ut/100)),
   average([P,Uw,Ut],_=Ueq/100),
   U is Ueq/10000. 

expected_utility_of(player_1, (T,left(P)),assess(ASSESS),utility(U=Ueq)):-
   expected_utility_0(player_1, (T,left),if(Be,De),utility(_=Ue/100)),
   expected_utility_0(player_1, (T,right),if(Bx,Dx),utility(_=Ux/100)),
   ASSESS=[if(left,Be,De),if(right,Bx,Dx)],
   possible_strategy_of(player_1, T,left,P),
   average([P,Ue,Ux],_=Ueq/100),
   U is Ueq/10000. 






%================================
% rational choices
%================================

rational_choice_of(player_2, if(LOR),belief(type_1:P),down(Q),utility(U)):-
   member(LOR,[right, left]),
   EU=expected_utility_of(player_2, if(LOR),belief(type_1:P),down(Q),utility(U=_)),
   max(U,EU).

/*
% for the entry-deterence model (#4)

?- rational_choice_of(player_2, E,B,D,U).

E = if(right)
B = belief(type_1, 1)
D = down(0)
U = utility(5) ;

E = if(right)
B = belief(type_1, 1)
D = down(1)
U = utility(5) ;

E = if(left)
B = belief(type_1, 0)
D = down(1)
U = utility(1) ;

No
?- 
*/


% modified: 20 Jun 2004. bugfix.

rational_choice_of(player_1, (T,left(P)),assess(ASSESS),utility(U=Ueq)):-
   possible_type_of(player_1, T),
   EU=expected_utility_of(player_1, (T,left(P)),assess(ASSESS),utility(U=Ueq)),
   setof((T,P,U=Ueq),EU,Bag),
   max(U,member((T,P,U=Ueq),Bag)).

/*
% for the entry-deterence model (#4)

?- rational_choice_of(player_1, A,B,C).

A = type_1, left(0)
B = assess([if(left, belief(type_1:0), down(1)), if(right, belief(type_1:1), down(0))])
C = utility(0= (0* (100* -2+0*2)+100* (0*0+100*0))/10000) ;

A = type_1, left(0)
B = assess([if(left, belief(type_1:0), down(1)), if(right, belief(type_1:1), down(1))])
C = utility(0= (0* (100* -2+0*2)+100* (100*0+0*0))/10000) ;

A = type_2, left(1)
B = assess([if(left, belief(type_1:0), down(1)), if(right, belief(type_1:1), down(0))])
C = utility(1= (100* (100*1+0*3)+0* (0*0+100*0))/10000) ;

A = type_2, left(1)
B = assess([if(left, belief(type_1:0), down(1)), if(right, belief(type_1:1), down(1))])
C = utility(1= (100* (100*1+0*3)+0* (100*0+0*0))/10000) ;

No
?- 
*/


%==========================
% consistency test
%==========================

consistent_if(P,P,yes):-!.
consistent_if(_,_,no).

:- dynamic consistency/4.

% modified: 20 Jun 2004. bugfix.

consistency_test(type_1:P1,type_2:P2):-
   strategy_of(player_1, T,left,P0),
   rational_choice_of(player_1, (T,left(P)),assess(ASSESS),utility(_)),
   (
    member((ON,P,X),[(if(right,B,D),0,a),(if(left,B,D),1,b)])
    ;
    (P >0, P <1, X=c)
   ),
   member(ON,ASSESS),
   subtract(ASSESS,[ON],[OFF]),
   consistent_if(P,P0,Y),
   CON = consistency(Y,(type_1:P1,type_2:P2),(T,left(hypo:P0,best:P)),(ON,OFF)),
   (clause(CON,true)->true;assert(CON)),
   fail.

consistency_test(_,_).


%:- dynamic strategy_of/4.

consistency_test:-
   abolish(consistency/4),
   scan_strategies_of_player_1(type_1:P1,type_2:P2),
   update_strategies_of_player_1(type_1:P1,type_2:P2),
   consistency_test(type_1:P1,type_2:P2),
   fail.
consistency_test:-
   display_models,
   nl,display_half_line,
   write('results'),
   display_half_line,
   display_consistency_results(yes).

display_models:-
   nl,display_half_line,
   write('model'),
   display_half_line,
   P=prior_probability_of(T,Q),
   findall((T,Q),P,PRIORs),
   nl,write(priors:PRIORs),
   S1=possible_strategy_of(player_1, T,A,R),
   S2=possible_strategy_of(player_2, A,R),
   forall(setof(R,S1,SR1),(nl,write(strategies(1,T,A):SR1))),
   forall(setof(R,S2,SR2),(nl,write(strategies(2,A):SR2))).
   
display_half_line:-
   write('--------------------').

scan_strategies_of_player_1(type_1:P1,type_2:P2):-
   possible_strategy_of(player_1, type_1,left,P1),
   possible_strategy_of(player_1, type_2,left,P2).

update_strategies_of_player_1(type_1:P1,type_2:P2):-
   retractall(strategy_of(player_1, type_1,left,_)),
   retractall(strategy_of(player_1, type_2,left,_)),
   assert(strategy_of(player_1, type_1,left,P1)),
   assert(strategy_of(player_1, type_2,left,P2)),
   !.

display_strategies_of_player_1(type_1:P1,type_2:P2):-
   S1=strategy_of(player_1, type_1,left,P1),
   S2=strategy_of(player_1, type_2,left,P2),
   nl,write(S1),nl,write(S2).

% modified: 20 Jun 2004. bugfix.

display_consistency_results(all):-
   scan_strategies_of_player_1(type_1:P1,type_2:P2),%nl,write('Prob(left)->'),
   display_strategies_of_player_1(type_1:P1,type_2:P2),
   consistency(Y,(type_1:P1,type_2:P2),(T,left(hypo:P0,best:P)),(IN,OUT)),
   IN=if(AI,BI,DI):in,
   OUT=if(AN,BN,DN):off,
   nl,
   tab(1),write((T,'best reply':left(P),assumed:P0,consistency:Y)),
   nl,
   tab(1),write('strategies of opponent:'),
   nl,
   tab(2),write(AI->DI:BI),
   nl,
   tab(2),write(AN->DN:BN),
   fail.

display_consistency_results(yes):-
   scan_strategies_of_player_1(type_1:P1,type_2:P2),
   findall(T,consistency(yes,(type_1:P1,type_2:P2),(T,_),_),Bag),
   sort(Bag,[type_1,type_2]),
   display_strategies_of_player_1(type_1:P1,type_2:P2),
   consistency(yes,(type_1:P1,type_2:P2),(T,left(hypo:P0,best:P)),(ON,OFF)),
   ON=if(AI,BI,DI),
   OFF=if(AN,BN,DN),
   nl,
   tab(1),write((T,'best reply':left(P),assumed:P0,consistency:yes)),
   nl,
   tab(2),write('opponent strategies:'),
   nl,
   tab(3),write((AI->DI:BI)),
   nl,
   tab(3),write((AN->DN:BN)),
   fail.

display_consistency_results(_).


test_a:-
  member(X,[a,b,c]),
  test(X,Y),
  write(Y),
  fail.
test(a,1).
test(b,2).
test(c,3).

:- title(A),forall(member(B,A),(nl,write(B))).
%:-figure.




%********************************************************************
% Modeling Dempster-Shafer Equilibrium of Signaling Games
%********************************************************************
% reference: Ryan(2002).

%================================
% Choquet expected utilities
%================================

choquet_expected_utility_of(player_2, if(LOR),belief(type_1:P),down(Q),utility(U=Ueq)):-
   updating_beliefs_of_player_2(conditional_on(player_2s_act), LOR),
   choquet_expected_utility_0(player_2, if(type_1,LOR),down(Q),utility(_=Uw/100)),
   choquet_expected_utility_0(player_2, if(type_2,LOR),down(Q),utility(_=Ut/100)),
   choquet_integral([P,Uw,Ut],_=Ueq/100),
   U is Ueq/10000. 

choquet_expected_utility_of(player_1, (T,left(P)),assess(ASSESS),utility(U=Ueq)):-
   choquet_expected_utility_0(player_1, (T,left),if(Be,De),utility(_=Ue/100)),
   choquet_expected_utility_0(player_1, (T,right),if(Bx,Dx),utility(_=Ux/100)),
   ASSESS=[if(left,Be,De),if(right,Bx,Dx)],
   possible_strategy_of(player_1, T,left,P),
   choquet_integral([P,Ue,Ux],_=Ueq/100),
   U is Ueq/10000. 

/* ranking and cumulating events */

% renshu
ranked_states(player1:T,act(A),states(SL),payoffs(PL)):-
   pure_strategy_of(player_1,A),
   possible_type_of(player_1,T),
   setof((U,Y),U^Y^payoff([T,A,Y],player_1,U),R),
   reverse(R,R1),
   findall([S],member((U,S),R1),SL),
   findall(U,member((U,S),R1),PL).

ranked_states(player_2,act(A),states(SL),payoffs(PL)):-
   pure_strategy_of(player_2,A),
   setof((U,[T,X]),U^T^X^payoff([T,X,A],player_2,U),R),
   reverse(R,R1),
   findall(S,member((U,S),R1),SL),
   findall(U,member((U,S),R1),PL).

% ranking of events

leveled_event(player_1:T,act(A),event(E),payoff(U),rank(K/N)):-
   possible_type_of(player_1, T),
   pure_strategy_of(player_1, A),
   setof(U,X^payoff([T,A,X],player_1,U),R),
   length(R,N),
   reverse(R,R1),
   nth1(K,R1,U),
   findall([X],payoff([T,A,X],player_1,U),E0),
   sort(E0,E).

leveled_event(player_2,act(A),event(E),payoff(U),rank(K/N)):-
   pure_strategy_of(player_2,A),
   setof(U,T^X^payoff([T,X,A],player_2,U),R),
   length(R,N),
   reverse(R,R1),
   nth1(K,R1,U),
   findall([T,X],payoff([T,X,A],player_2,U),E0),
   sort(E0,E).

/* Choquet Integral Representation */

a_level_of_ceu(player_1:T,act(F),rank(1/N),[A,[A],[X],[B],[X*B]],V):-
   possible_type_of(player_1, T),
   pure_strategy_of(player_1, F),
   leveled_event(player_1:T,act(F),event(A),payoff(X),rank(1/N)),
   prior_belief_of((player_1,F),A,B),
   V is X * B.

a_level_of_ceu(player_1:T,act(F),rank(K/N),[Y,[A|O],[X|Z],[B|C],[V|U]],E):-
   possible_type_of(player_1, T),
   pure_strategy_of(player_1, F),
   a_level_of_ceu(player_1:T,act(F),rank(K0/N),[W,O,Z,C,U],E0),
   (K0=N->!,fail;K is K0 + 1),
   leveled_event(player_1:T,act(F),event(A),payoff(X),rank(K/N)),
   C=[B0|_],
   append(A,W,AW),
   sort(AW,Y),
   prior_belief_of((player_1,F),Y,B),
   V = X * (B-B0),
   E is E0 + V.

a_level_of_ceu(player_2:observed(D),act(F),rank(1/N),[A,[A],[X],[B],[X*B]],V):-
   pure_strategy_of(player_1, D),
   pure_strategy_of(player_2, F),
   leveled_event(player_2,act(F),event(A),payoff(X),rank(1/N)),
   updated_belief_of_player_2_conditional_on_player_1s_act(A/D,_,B),
   V is X * B.

a_level_of_ceu(player_2:observed(D),act(F),rank(K/N),[Y,[A|O],[X|Z],[B|C],[V|U]],E):-
%   pure_strategy_of(player_1, D),  % You should backtrack `without' observated act specified!
   pure_strategy_of(player_2, F),
   a_level_of_ceu(player_2:observed(D),act(F),rank(K0/N),[W,O,Z,C,U],E0),
   (K0=N->!,fail;K is K0 + 1),
   leveled_event(player_2,act(F),event(A),payoff(X),rank(K/N)),
   C=[B0|_],
   append(A,W,AW),
   sort(AW,Y),
   updated_belief_of(player_2,conditional_on:player_1s_act,Y/D,_,B),
   V = X * (B-B0),
   E is E0 + V.

ceu_of(player_1:Type,Act,[Levels,Events,Payoffs,Beliefs,Terms_of_CEU],CEU_Value):-
   possible_type_of(player_1, Type),
   pure_strategy_of(player_1, Act),
   a_level_of_ceu(
     player_1:Type,
     act(Act),
     rank(Levels/Levels),
     [_,Events,Payoffs,Beliefs,Terms_of_CEU],
     CEU_Value
   ).

ceu_of(player_2:observed(D),Act,[Levels,Events,Payoffs,Beliefs,Terms_of_CEU],CEU_Value):-
   pure_strategy_of(player_1, D),
   pure_strategy_of(player_2, Act),
   a_level_of_ceu(
     player_2:observed(D),
     act(Act),
     rank(Levels/Levels),
     [_,Events,Payoffs,Beliefs,Terms_of_CEU],
     CEU_Value
   ).


%================================
% Choquet rational choices 
%================================
%  which are uncertainty averse when convex capacities (UACEU)

choquet_rational_choice_of(player_2, observed(D),A,R,ceu(U)):-
   pure_strategy_of(player_1, D),
   max(U, ceu_of(player_2:observed(D),A,[_,_,_,_,R],U)).

choquet_rational_choice_of(player_1, type(T),A,R,ceu(U)):-
   possible_type_of(player_1, T),
   max(U,ceu_of(player_1:T,A,[_,_,_,_,R],U)).

set_of_choquet_rational_choices_of(player_2,observed(X), R,Ys):-
   pure_strategy_of(player_1,X),
   findall([U,Y],
    (
     choquet_rational_choice_of(player_2,observed(X),Y,_,ceu(U))
    ),
   R),
   findall(Y,member([U,Y],R),Ys).

set_of_choquet_rational_choices_of(player_1,type(T), R,Xs):-
   possible_type_of(player_1,T),
   findall([U,X],
    (
     choquet_rational_choice_of(player_1,type(T),X,_,ceu(U))
    ),
   R),
   findall(X,member([U,X],R),Xs).



%====================================================
% Dempster-Shafer Equilibrium for Signaling Games
%====================================================
% (Ryan,2002; Eichberger and Kelsey, 1999)
% equilibrium under Dempster-Shafer rational beliefs.
% For each (updated-)belief of players, 
% it should have a support which consists of the opponents' 
% Choquet rational plays(i.e., the support contained in the best reply profile).

ds_equilibrium_plays([Y1,Y2,Y3,Y4],RB1,RB2s):-
   dse_condition(1,_,Y1),
   dse_condition(2,_,Y2),
   dse_condition(3,RB1,Y3),
   dse_condition(4,RB2s,Y4).


violate_for_dse(condition(1),[prior(player_1),X,M]):-
   pure_strategy_of(player_1,X),
   modularity(prior(player_1,X),M),
   member(M,[submodular,nonlinear]).

violate_for_dse(condition(1),[prior(player_2),M]):-
   modularity(prior(player_2),M),
   member(M,[submodular,nonlinear]).

violate_for_dse(condition(2),[updated(player_2),X,Y]):-
   pure_strategy_of(player_1,X),
   event_of((player_2,X),Y),
   \+ updated_belief_of(player_2,conditional_on:player_1s_act,Y/X,_,_).


dse_condition(N,C,no):-
   member(N,[1,2]),
   violate_for_dse(condition(N),C),
   !.

dse_condition(N,[],yes):-
   member(N,[1,2]).

dse_condition(3,[in_belief_of_player_2,support:B1,br:R1,nbr_in_support:O1],Y):-
   support_of((player_2,prior), B1),
   findall([T,X,ceu(U)],
    (
     member([T,X],B1),
     choquet_rational_choice_of(player_1,type(T),X,_,ceu(U))
    ),
   R1),
   findall([T,X],(member([T,X],B1),\+ member([T,X,_],R1)),O1),
   (O1=[]->Y=yes;Y=no).

dse_condition((4,X),[in_belief_of_player_1,support:B2,br:R2,nbr_in_support:O2],Y):-
   support_of((player_1,X), B2),
   findall([Y,ceu(V)],
    (
     member([Y],B2),
     choquet_rational_choice_of(player_2,observed(X),Y,_,ceu(V))
    ),
   R2),
   findall([Y],(member([Y],B2),\+ member([Y,_],R2)),O2),
   (O2=[]->Y=yes;Y=no).

dse_condition(4,[in_belief_of_player_1,left:(HL,YL),right:(HR,YR)],Y):-
   dse_condition((4,left),[_|HL],YL),
   dse_condition((4,right),[_|HR],YR),
   (member(no,[YL,YR])->Y=no;Y=yes).

/*
?- ds_equilibrium_plays(Conditions,[A_______,B,C,D],[E________,F:G,H:I]).

Conditions = [yes, yes, yes, yes]
A_______ = in_belief_of_player_2
B = support:[[type_1, left], [type_2, right]]
C = br:[[type_1, left, ceu(1)], [type_2, right, ceu(1)]]
D = nbr_in_support:[]
E________ = in_belief_of_player_1
F = left
G = [support:[[down]], br:[[down, ceu(1.33333)]], nbr_in_support:[]], yes
H = right
I = [support:[[down]], br:[[down, ceu(1.33333)]], nbr_in_support:[]], yes ;

No
?- 

*/



%----------------------------------------------------------------
% Ryan's Refinement: Robust DSE
%----------------------------------------------------------------

dse_condition((5,X),[in_belief_of_player_2,S,M,S1],Y):-
   belief_persistence(S,X,M,S1,Y).

dse_condition(5,[in_belief_of_player_2,left:(BPL,YL),right:(BPR,YR)],Y):-
   dse_condition((5,left),[in_belief_of_player_2|BPL],YL),
   dse_condition((5,right),[in_belief_of_player_2|BPR],YR),
   (member(no,[YL,YR])->Y=no;Y=yes).
 
robust_ds_equilibrium_plays([Y1,Y2,Y3,Y4,Y5],C3,C4,C5):-
   ds_equilibrium_plays([Y1,Y2,Y3,Y4],C3,C4),
   dse_condition(5,C5,Y5).


%  belief persistence (minimal change in theory)
%---------------------------------------------------
% In the light of new information, any rational belief system would satisfies 
% the minimal change principle, i.e., unless empty, the support of conditional belief should 
% be the intersection of observed event and support of prior belief. 

belief_persistence(S,A,M,S1,Y):-
   support_of((player_2,prior),S),
   support_of((player_2,observed(A)),S1),
   findall([T,A],member([T,A],S),M),
   ((M=[];sort(M,S))->Y=yes;Y=no).

/*
?- belief_persistence(S,A,M,S1,Y).

S = [[type_1, left], [type_2, right]]
A = right
M = [[type_2, right]]
S1 = [[type_1, right], [type_2, right]]
Y = no ;

S = [[type_1, left], [type_2, right]]
A = left
M = [[type_1, left]]
S1 = [[type_1, left], [type_2, left]]
Y = no ;

No
?- 
*/


%----------------------------------------------------------------
% update rule(2) : convex capacities and D-S update rule
%----------------------------------------------------------------
% cited & Modified from: beleq03.

%----------------------
% state space
%----------------------

states_of(player_2, 
 [
  [type_1, left],
  [type_1, right],
  [type_2, left],
  [type_2, right]
 ]
).

states_of((player_1, left), [[down],[up]]).
states_of((player_1, right), [[down],[up]]).

event_of(Player,E):-
   states_of(Player,A),
   subset_of(E,_,A).   


proper_subset_of(Y,S):-
   subset_of(Y,_,S),
   Y \= S.

complement_of(Event,Comp,Player):-
   states_of(Player,W),
   event_of(Player,Event),
   subtract(W,Event,Comp).


%--------------------------------------------------
% Support of Nonadditive Probability Measure
%--------------------------------------------------
% cited & modified from: beleq03 (Mar 2004)
%  definition of support by Dow and Werlang(1994).
%  also in Eichberger and Kelsey(1999).

%  supp(P):=A s.t. P(A*)=0 and for all subset B of A, P(B*)>0.
%  where C* denotes complement of event C.


event_whose_complement_is_measure_zero(S,player_1,A):-
   complement_of(S,C,(player_1,A)),
   prior_belief_of((player_1,A), C,0).

event_whose_complement_is_measure_zero(S,player_2,prior):-
   complement_of(S,C,player_2),
   prior_belief_of(player_2, C,0).

event_whose_complement_is_measure_zero(S,player_2,observed(D)):-
   complement_of(S,C,player_2),
   updated_belief_of_player_2_conditional_on_player_1s_act(C/D,_,0).

/*

?- event_whose_complement_is_measure_zero(S,P,A),nl,write(P-A-S),fail.

player_1-left-[[down]]
player_1-left-[[down], [up]]
player_1-right-[[down]]
player_1-right-[[down], [up]]
player_2-prior-[[type_1, left], [type_2, right]]
player_2-prior-[[type_1, left], [type_2, left], [type_2, right]]
player_2-prior-[[type_1, left], [type_1, right], [type_2, right]]
player_2-prior-[[type_1, left], [type_1, right], [type_2, left], [type_2, right]]
player_2-prior-[[type_1, left], [type_1, right], [type_2, left], [type_2, right]]
player_2-prior-[[type_1, left], [type_1, right], [type_2, left], [type_2, right]]
player_2-observed(right)-[[type_1, right], [type_2, right]]
player_2-observed(right)-[[type_1, right], [type_2, left], [type_2, right]]
player_2-observed(left)-[[type_1, left], [type_2, left]]
player_2-observed(left)-[[type_1, left], [type_2, left], [type_2, right]]
player_2-observed(right)-[[type_1, left], [type_1, right], [type_2, right]]
player_2-observed(left)-[[type_1, left], [type_1, right], [type_2, left]]
player_2-observed(left)-[[type_1, left], [type_1, right], [type_2, left], [type_2, right]]
player_2-observed(right)-[[type_1, left], [type_1, right], [type_2, left], [type_2, right]]

No

*/

support_of((Player,B), S):-
   event_whose_complement_is_measure_zero(S,Player,B),
   \+ (
     proper_subset_of(Y,S),
     event_whose_complement_is_measure_zero(Y,Player,B)
   ).


/*

?- support_of(A,B).

A = player_1, left
B = [[down]] ;

A = player_1, right
B = [[down]] ;

A = player_2, prior
B = [[type_1, left], [type_2, right]] ;

A = player_2, observed(right)
B = [[type_1, right], [type_2, right]] ;

A = player_2, observed(left)
B = [[type_1, left], [type_2, left]] ;

No
?-

*/

%----------------------
% Dempster-Shafer rule
%----------------------

%               v(union(A,-B)) - v(-B)
% v_ds(A|B) =  ------------------------
%               1 - v(-B)

updated_belief_of(player_2,conditional_on:event,E/D,Yq,Y):-
   prior_belief_of(player_2,E,_Y1),
   prior_belief_of(player_2,D,_Y2),
   complement_of(D,F,player_2),
   union(E,F,G0),
   sort(G0,G),
   prior_belief_of(player_2,F,Y3),
   \+ Y3 is 1,
   prior_belief_of(player_2,G,Y4),
   Yq = (Y4 - Y3)/(1-Y3),
   Y is Yq.

updated_belief_of(player_2,conditional_on:player_1s_act,E/Act,Yq,Y):-
   pure_strategy_of(player_1, Act),
   states_of(player_2,A),
   findall([Type,Act],member([Type,Act],A),B),
   updated_belief_of(player_2,conditional_on:event,E/B,Yq,Y).

updated_belief_of_player_2_conditional_on_player_1s_act(E/Act,Yq,Y):-
   updated_belief_of(player_2,conditional_on:player_1s_act,E/Act,Yq,Y).


/*

% for Ryan(2002)'s signaling game (#1)

?- updated_belief_of(player_2,conditional_on:event,[[type_2,left]]/[[type_1,left],[type_2,left]],B,C).

B = (0.5-0.25)/ (1-0.25)
C = 0.333333 

Yes
?- updated_belief_of(player_2,conditional_on:act_of_player_1,[T,X]/left,B,C),C>0.

T = [type_2, left]
X = [type_2, right]
B = (0.5-0.25)/ (1-0.25)
C = 0.333333 ;

T = [type_1, right]
X = [type_2, left]
B = (0.5-0.25)/ (1-0.25)
C = 0.333333 ;

T = [type_1, left]
X = [type_2, right]
B = (0.75-0.25)/ (1-0.25)
C = 0.666667 ;

T = [type_1, left]
X = [type_2, left]
B = (1-0.25)/ (1-0.25)
C = 1 ;

T = [type_1, left]
X = [type_1, right]
B = (0.75-0.25)/ (1-0.25)
C = 0.666667 ;

No
?- updated_belief_of(player_2,conditional_on:player_1s_act,[T,X]/left,B,C),C>0.

T = [type_2, left]
X = [type_2, right]
B = (0.5-0.25)/ (1-0.25)
C = 0.333333 ;

T = [type_1, right]
X = [type_2, left]
B = (0.5-0.25)/ (1-0.25)
C = 0.333333 ;

T = [type_1, left]
X = [type_2, right]
B = (0.75-0.25)/ (1-0.25)
C = 0.666667 ;

T = [type_1, left]
X = [type_2, left]
B = (1-0.25)/ (1-0.25)
C = 1 ;

T = [type_1, left]
X = [type_1, right]
B = (0.75-0.25)/ (1-0.25)
C = 0.666667 ;

No
?- 

*/



%--------------------------------------------
% modeling prior beliefs by 
% convex capacity (belief function).
%--------------------------------------------
% See Ryan(2002).

prior_belief_of((player_1,A), E,Q):-
   prior_belief_0((player_1,A), E,Q).

% It is assumed that the beliefs of Player 1 are constant over own choices, no matter what. 

/*
?- findall(W,
    setof(A-Q,(member(A,[left,right]),prior_belief_of((player_1,A), _,Q)),W),
    Z).

W = _G181
A = _G157
Q = _G158
Z = [[left-0, right-0], [left-1, right-1], [left-0, right-0], [left-1, right-1]] 

Yes
?- 
*/


% Ryan(2002)'s capacity for Player 2's prior belief.

prior_belief_of(player_2, E,Q):-
   prior_belief_1(player_2, E:P),
   findall(X,
    (
     prior_probability_of(Type,P0),
     augmented_capacity(Type,E,W),
     X = W * P0
    ),
   Y),  %nl,write(Y),
   sum(Y,Z),
   Q is (P+Z)/2.
   

augmented_capacity(Type,Event,P):-
   (var(Event)->event_of(player_2,Event);true),
   possible_type_of(player_1, Type),
   states_of(player_2,A),
   setof([Type|X],member([Type|X],A),TX),
   (subset(TX,Event)->P=1;P=0).

%augmented_capacity(_,_,0).


%----------------------
% prior belief of Player 2:
% convex capacity over Player 1's types and choices.
%----------------------
prior_belief_0(player_2,[],0).
prior_belief_0(player_2, [[type_2, right]],0.5).
%prior_belief_0(player_2, [[type_2, left]],0).
%prior_belief_0(player_2, [[type_2, left], [type_2, right]],0).
%prior_belief_0(player_2, [[type_1, right]],0).
%prior_belief_0(player_2, [[type_1, right], [type_2, right]],0).
%prior_belief_0(player_2, [[type_1, right], [type_2, left]],0).
%prior_belief_0(player_2, [[type_1, right], [type_2, left], [type_2, right]],0).
prior_belief_0(player_2, [[type_1, left]],0.5).
%prior_belief_0(player_2, [[type_1, left], [type_2, right]],0).
%prior_belief_0(player_2, [[type_1, left], [type_2, left]],0).
%prior_belief_0(player_2, [[type_1, left], [type_2, left], [type_2, right]],0).
%prior_belief_0(player_2, [[type_1, left], [type_1, right]],0).
%prior_belief_0(player_2, [[type_1, left], [type_1, right], [type_2, right]],0).
%prior_belief_0(player_2, [[type_1, left], [type_1, right], [type_2, left]],0).
prior_belief_0(player_2, [[type_1, left], [type_1, right], [type_2, left], [type_2, right]],1).

%----------------------
% prior belief of Player 1:
% convex capacity over Player 2's choices conditional on the own possible choice.
%----------------------

prior_belief_0((player_1, left), [],0).
prior_belief_0((player_1, left), [[down]],1).
prior_belief_0((player_1, left), [[up]],0).
prior_belief_0((player_1, left), [[down],[up]],1).

prior_belief_0((player_1, right), [],0).
prior_belief_0((player_1, right), [[down]],1).
prior_belief_0((player_1, right), [[up]],0).
prior_belief_0((player_1, right), [[down],[up]],1).

   
% default assignment
% (1) using the additive expansion.
%----------------------

prior_belief_1(Player, Event:P):-
   event_of(Player,Event),
   addtition_of_prior_beliefs_0(Player, Event:P).

addtition_of_prior_beliefs_0(Player, Event:P):-
   findall(Q,
    (
     member(S,Event),
     prior_belief_0(Player, [S],Q)
    ),
   R),
   sum(R,P).

% (2) using the lower probability and the monotonicity.
% not used here!
%----------------------

prior_belief_1b(Player, Event:P):-
   event_of(Player,Event),
   set_prior_belief_0_if_exists(Player, Event:P).

set_prior_belief_0_if_exists(Player, Event:P):-
   prior_belief_0(Player, Event:P),
   !.

set_prior_belief_0_if_exists(Player, Event:P):-
   use_lower_monotone_rule(Player, Event:P1),
   (var(P)->P=P1;P is P1).

use_lower_monotone_rule(Player, Event:P):-
   max(P,
     (
      prior_belief_0(Player, F:P),
      subset(F,Event)
     )
   ).



%---------------------------------------------------
% b.p.a. via Mevious inversion
%---------------------------------------------------
%  mass(B) = sum(for(subset(A,B)), (-1)^|B-A| * v(A)).
% cited & modified from: beleq03

mass(E,G,Y,(BEL,Player)):-
   (var(BEL)->BEL=prior_belief_of;true),
   Goal=..[BEL,Player,F:B],
   event_of(Player,E),
   E \= [],
   findall(A,
     (
      subset_of(F,_,E),
      Goal,
      movius(F,E,K),
      A = K * B
     ),
   G),
   sum(G,Y).

movius(X,Y,Z):-
   subtract(Y,X,W),
   length(W,M),
   Z = (-1)^M.



/*

?- mass(A,_,C,(prior_belief_of,player_2)),C>0.

A = [[type_2, right]]
C = 0.25 ;

A = [[type_2, left], [type_2, right]]
C = 0.25 ;

A = [[type_1, left]]
C = 0.25 ;

A = [[type_1, left], [type_1, right]]
C = 0.25 ;

No
?-

*/



%---------------------------------------------------
%  test of modularity for pairwise events 
%---------------------------------------------------
% cited from: beleq03

modularity(prior(P),[E,F],[G,H],REL,Z):-
   prior_belief_of(P,E,Y1),
   prior_belief_of(P,F,Y2),
   union(E,F,G),
   intersection(E,F,H),
   prior_belief_of(P,G,Y3),
   prior_belief_of(P,H,Y4),
   case_of_pairwise_modularity([Y1,Y2,Y3,Y4],REL,Z).

modularity(updated(player_2,D),[E,F],[G,H],REL,Z):-
   updated_belief_of_player_2_conditional_on_player_1s_act(E/D,_,Y1),
   updated_belief_of_player_2_conditional_on_player_1s_act(F/D,_,Y2),
   union(E,F,G),
   intersection(E,F,H),
   updated_belief_of_player_2_conditional_on_player_1s_act(G/D,_,Y3),
   updated_belief_of_player_2_conditional_on_player_1s_act(H/D,_,Y4),
   case_of_pairwise_modularity([Y1,Y2,Y3,Y4],REL,Z).

case_of_pairwise_modularity([Y1,Y2,Y3,Y4], REL, Z):-
   X1 is Y1 + Y2,
   X2 is Y3 + Y4,
   case_of_pairwise_modularity1(X1,X2, REL0, Z),
   REL0 =.. [OP, X1, X2],
   REL =.. [OP, Y1 + Y2, Y3 + Y4].

case_of_pairwise_modularity1(X1,X2, REL, modular):-
   num_eq(X1,X2),!, REL=(X1 = X2).
case_of_pairwise_modularity1(X1,X2, REL, supermodular):-
   \+ num_eq(X1,X2), X1X2,!, REL=(X1 > X2).

num_eq(X,Y):-
   Z is (X - Y)^2,
   Z < 10^(-10).

%  total test of modularity
%---------------------------------

modularity(P,Z):-
   member(P,
    [
     prior((player_1,left)),
     prior((player_1,right)),
     prior(player_2),
     updated(player_2,left),
     updated(player_2,right)
    ]
   ),
   findall(Y,
    (
     modularity(P,_,_,_,Y)
    ),
   W),
   sort(W,W1),
   case_of_modularity(W1,Z).

case_of_modularity([modular],modular):-!.
case_of_modularity(G,supermodular):- \+ member(submodular,G),!.
case_of_modularity(G,submodular):- \+ member(supermodular,G),!.
case_of_modularity(_G,nonlinear).


/*

?- modularity(A,B).

A = prior((player_1, left))
B = modular ;

A = prior((player_1, right))
B = modular ;

A = prior(player_2)
B = supermodular ;

A = updated(player_2, left)
B = modular ;

A = updated(player_2, right)
B = modular ;

No
?- 

*/







%=================
% common programs
%=================

% sum
% -----------------------------------------------------------  %
sum([],0).
sum([X|Members],Sum):-
   sum(Members,Sum1),
   Sum is Sum1 + X.



% maximal solution for given goal clause : a naive solver 
%---------------------------------------------------------

max(X,Goal):-
  % X: the objective variable,
  % Goal: the objective function and constraints,
  setof((X,Goal),Goal,Z),
  member((X,Goal),Z),
  \+ (
    member((Y,_),Z),
    Y > X
  ).


% probability 
% -----------------------------------------------------------  %
probability(W,P):-
   N1 is integer(1/W) + 1,
   length(L,N1),nth1(K,L,_), K1 is K - 1, P is K1/(N1 - 1).


/*
% allocation
% -----------------------------------------------------------  %
allocation(N,A,[X|Y]):-
    allocation(N,A,A,[X|Y]).
allocation(0,_,0,[]).
allocation(N,A,B,[X|Y]):-
    integer(A),
    length([X|Y],N),
    allocation(_N1,A,B1,Y),
    K is A - B1 + 1,
    length(L,K),
    nth0(X,L,X),
    B is B1 + X.
%
% probability (percentile) by using allocation
% -----------------------------------------------------------  %
probabilities(0,[]).
probabilities(N,[X|Y]):-
    integer(N),
    length([X|Y],N),
    allocation(N,100,[X|Y]).

*/


% a sequence of binary choice for a list:
%--------------------------------------------------
list_projection([],[],[]).
list_projection([X|Y],[_A|B],C):-
   X = 0,
   list_projection(Y,B,C).
list_projection([X|Y],[A|B],[A|C]):-
   X = 1,
   list_projection(Y,B,C).
%
% subset_of/3 : subset-enumeration 
% -----------------------------------------------------------  %
subset_of(A,N,As):-
   length(As,L),
   length(D,L),
   list_projection(D,As,B),
   length(B,N),
   sort(B,A).



%-------------
%    demos
%-------------

/*

% for the second example, figure 4.4.2, of Gibbons(1992). [(L,L),(L->u,R->d),p=0.5,q].
% pure strategies case.
% without condition 5 applied.

?- consistency_test.

strategy_of(player_1, type_1, left, 1)
strategy_of(player_1, type_2, left, 1)
consistency(yes), (type_1, left(hypo:1, best:1)), on:if(left, belief(type_1:0.5), down(0)), off:[if(right, belief(type_1:0.5), down(0))]
consistency(yes), (type_1, left(hypo:1, best:1)), on:if(left, belief(type_1:0.5), down(0)), off:[if(right, belief(type_1:0.5), down(1))]
consistency(yes), (type_2, left(hypo:1, best:1)), on:if(left, belief(type_1:0.5), down(0)), off:[if(right, belief(type_1:0.5), down(1))]

Yes

% for the first example, figure 4.2.2 of Gibbons(1992). [(R,L),(L->u,R->d)],[(L,L),(->u)]
% pure strategies case.
% without condition 5 applied.

?- consistency_test.

strategy_of(player_1, type_1, left, 0)
strategy_of(player_1, type_2, left, 1)
consistency(yes), (type_1, left(hypo:0, best:0)), on:if(right, belief(type_1:0), down(1)), off:[if(left, belief(type_1:0), down(0))]
consistency(yes), (type_2, left(hypo:1, best:1)), on:if(left, belief(type_1:0), down(0)), off:[if(right, belief(type_1:0), down(1))]
strategy_of(player_1, type_1, left, 1)
strategy_of(player_1, type_2, left, 1)
consistency(yes), (type_1, left(hypo:1, best:1)), on:if(left, belief(type_1:0.5), down(0)), off:[if(right, belief(type_1:0.5), down(1))]
consistency(yes), (type_2, left(hypo:1, best:1)), on:if(left, belief(type_1:0.5), down(0)), off:[if(right, belief(type_1:0.5), down(1))]

Yes
?- 

*/

/*
%--------------------------------
% case of pure strategies 
%--------------------------------

?- consistency_test.

strategy_of(player_1, type_1, left, 0)
strategy_of(player_1, type_2, left, 1)
consistency(yes), (type_1, left(hypo:0, best:0)), if(right, belief(type_1:1), down(0))
consistency(yes), (type_1, left(hypo:0, best:0)), if(right, belief(type_1:1), down(1))
consistency(yes), (type_2, left(hypo:1, best:1)), if(left, belief(type_1:0), down(1))
strategy_of(player_1, type_1, left, 1)
strategy_of(player_1, type_2, left, 1)
consistency(yes), (type_1, left(hypo:1, best:1)), if(left, belief(type_1:0.5), down(0))
consistency(yes), (type_2, left(hypo:1, best:1)), if(left, belief(type_1:0.5), down(0))

Yes
?- display_consistency_results(all).

strategy_of(player_1, type_1, left, 0)
strategy_of(player_1, type_2, left, 0)
consistency(no), (type_1, left(hypo:0, best:1)), if(left, belief(type_1:0.5), down(0))
consistency(no), (type_2, left(hypo:0, best:1)), if(left, belief(type_1:0.5), down(0))
strategy_of(player_1, type_1, left, 0)
strategy_of(player_1, type_2, left, 1)
consistency(yes), (type_1, left(hypo:0, best:0)), if(right, belief(type_1:1), down(0))
consistency(yes), (type_1, left(hypo:0, best:0)), if(right, belief(type_1:1), down(1))
consistency(yes), (type_2, left(hypo:1, best:1)), if(left, belief(type_1:0), down(1))
strategy_of(player_1, type_1, left, 1)
strategy_of(player_1, type_2, left, 0)
consistency(yes), (type_1, left(hypo:1, best:1)), if(left, belief(type_1:1), down(0))
consistency(no), (type_2, left(hypo:0, best:1)), if(left, belief(type_1:1), down(0))
strategy_of(player_1, type_1, left, 1)
strategy_of(player_1, type_2, left, 1)
consistency(yes), (type_1, left(hypo:1, best:1)), if(left, belief(type_1:0.5), down(0))
consistency(yes), (type_2, left(hypo:1, best:1)), if(left, belief(type_1:0.5), down(0))

Yes
?-  

%----------------------------------------------------------------
% case where 0.1 x n probabilities and prior for type_1 is 0.75.
%----------------------------------------------------------------

?- prior_probability_of(A,B).

A = type_1
B = 0.75 ;

A = type_2
B = 0.25 ;

No
?- consistency_test.

strategy_of(player_1, type_1, left, 0)
strategy_of(player_1, type_2, left, 1)
consistency(yes), (type_1, left(hypo:0, best:0)), if(right, belief(type_1:1), down(0))
consistency(yes), (type_1, left(hypo:0, best:0)), if(right, belief(type_1:1), down(0.1))
consistency(yes), (type_1, left(hypo:0, best:0)), if(right, belief(type_1:1), down(0.2))
consistency(yes), (type_1, left(hypo:0, best:0)), if(right, belief(type_1:1), down(0.3))
consistency(yes), (type_1, left(hypo:0, best:0)), if(right, belief(type_1:1), down(0.4))
consistency(yes), (type_1, left(hypo:0, best:0)), if(right, belief(type_1:1), down(0.5))
consistency(yes), (type_1, left(hypo:0, best:0)), if(right, belief(type_1:1), down(0.6))
consistency(yes), (type_1, left(hypo:0, best:0)), if(right, belief(type_1:1), down(0.7))
consistency(yes), (type_1, left(hypo:0, best:0)), if(right, belief(type_1:1), down(0.8))
consistency(yes), (type_1, left(hypo:0, best:0)), if(right, belief(type_1:1), down(0.9))
consistency(yes), (type_1, left(hypo:0, best:0)), if(right, belief(type_1:1), down(1))
consistency(yes), (type_2, left(hypo:1, best:1)), if(left, belief(type_1:0), down(1))
strategy_of(player_1, type_1, left, 1)
strategy_of(player_1, type_2, left, 1)
consistency(yes), (type_1, left(hypo:1, best:1)), if(left, belief(type_1:0.75), down(0))
consistency(yes), (type_2, left(hypo:1, best:1)), if(left, belief(type_1:0.75), down(0))

Yes
?- 

%----------------------------------------------------------------
% case where 0.1 x n probabilities and prior for type_1 is 1/3.
%----------------------------------------------------------------

?- prior_probability_of(A,B).

A = type_1
B = 1/3 ;

A = type_2
B = 0.666667 ;

No
?- consistency_test.

strategy_of(player_1, type_1, left, 0)
strategy_of(player_1, type_2, left, 1)
consistency(yes), (type_1, left(hypo:0, best:0)), if(right, belief(type_1:1), down(0))
consistency(yes), (type_1, left(hypo:0, best:0)), if(right, belief(type_1:1), down(0.1))
consistency(yes), (type_1, left(hypo:0, best:0)), if(right, belief(type_1:1), down(0.2))
consistency(yes), (type_1, left(hypo:0, best:0)), if(right, belief(type_1:1), down(0.3))
consistency(yes), (type_1, left(hypo:0, best:0)), if(right, belief(type_1:1), down(0.4))
consistency(yes), (type_1, left(hypo:0, best:0)), if(right, belief(type_1:1), down(0.5))
consistency(yes), (type_1, left(hypo:0, best:0)), if(right, belief(type_1:1), down(0.6))
consistency(yes), (type_1, left(hypo:0, best:0)), if(right, belief(type_1:1), down(0.7))
consistency(yes), (type_1, left(hypo:0, best:0)), if(right, belief(type_1:1), down(0.8))
consistency(yes), (type_1, left(hypo:0, best:0)), if(right, belief(type_1:1), down(0.9))
consistency(yes), (type_1, left(hypo:0, best:0)), if(right, belief(type_1:1), down(1))
consistency(yes), (type_2, left(hypo:1, best:1)), if(left, belief(type_1:0), down(1))
strategy_of(player_1, type_1, left, 1)
strategy_of(player_1, type_2, left, 1)
consistency(yes), (type_1, left(hypo:1, best:1)), if(left, belief(type_1:0.333333), down(0.2))
consistency(yes), (type_2, left(hypo:1, best:1)), if(left, belief(type_1:0.333333), down(0.2))

Yes
?- 
*/




return to front page.