/* True skill in Picat. Example inspired by Johannes Borgstrom, Andrew D. Gordon, Michael Greenberg, James Margetson, and Jurgen Van Gael: "Measure Transformer Semantics for Bayesian Machine Learning" https://www.microsoft.com/en-us/research/publication/measure-transformer-semantics-for-bayesian-machine-learning-2011/?from=http%3A%2F%2Fresearch.microsoft.com%2Fpubs%2F135344%2Fmsr-tr-2011-18.pdf In this setup the three persons a, b and c has some skill, and they have a performance (of some unidentified task/action). We only observe performance, but not knowing the skills. Here we observe that: - a performs better than both b and c - b berforms better than c. This is a port of my Gamble model gamble_true_skill.rkt. Cf: - ppl_true_skill_simple.pi, but this current model is quite simpler than that model. This program was created by Hakan Kjellerstrand, hakank@gmail.com See also my Picat page: http://www.hakank.org/picat/ */ import ppl_distributions,ppl_utils. main => go. /* var : skills(1) Probabilities (truncated): 110.356094944947088: 0.00059952038369305 (1 / 1668) 109.993527643642452: 0.00059952038369305 (1 / 1668) 109.641657089712837: 0.00059952038369305 (1 / 1668) 109.224664900945072: 0.00059952038369305 (1 / 1668) ......... 93.084629954279521: 0.00059952038369305 (1 / 1668) 92.722418466345346: 0.00059952038369305 (1 / 1668) 92.649811284188402: 0.00059952038369305 (1 / 1668) 91.814344865557558: 0.00059952038369305 (1 / 1668) mean = 101.633 var : skills(2) Probabilities (truncated): 109.381190329004596: 0.00059952038369305 (1 / 1668) 109.079093397621847: 0.00059952038369305 (1 / 1668) 108.922380070720692: 0.00059952038369305 (1 / 1668) 108.783588875059095: 0.00059952038369305 (1 / 1668) ......... 92.626708449667831: 0.00059952038369305 (1 / 1668) 92.60299547137663: 0.00059952038369305 (1 / 1668) 92.578781025840669: 0.00059952038369305 (1 / 1668) 89.365945066034357: 0.00059952038369305 (1 / 1668) mean = 100.015 var : skills(3) Probabilities (truncated): 106.791474649404591: 0.00059952038369305 (1 / 1668) 106.299618876289259: 0.00059952038369305 (1 / 1668) 106.043259638697933: 0.00059952038369305 (1 / 1668) 105.990883269359031: 0.00059952038369305 (1 / 1668) ......... 89.826945166157216: 0.00059952038369305 (1 / 1668) 89.604280468223024: 0.00059952038369305 (1 / 1668) 89.202816851513319: 0.00059952038369305 (1 / 1668) 88.753559232947239: 0.00059952038369305 (1 / 1668) mean = 98.2731 var : performance(1) Probabilities (truncated): 118.918097564094154: 0.00059952038369305 (1 / 1668) 116.980542653897558: 0.00059952038369305 (1 / 1668) 116.669790286945485: 0.00059952038369305 (1 / 1668) 116.597275205553785: 0.00059952038369305 (1 / 1668) ......... 94.591587425659952: 0.00059952038369305 (1 / 1668) 94.497938522414927: 0.00059952038369305 (1 / 1668) 94.20561363407603: 0.00059952038369305 (1 / 1668) 93.311249208775578: 0.00059952038369305 (1 / 1668) mean = 104.113 var : performance(2) Probabilities (truncated): 111.450364215319553: 0.00059952038369305 (1 / 1668) 110.778342365320626: 0.00059952038369305 (1 / 1668) 109.853839073761691: 0.00059952038369305 (1 / 1668) 109.766116879680965: 0.00059952038369305 (1 / 1668) ......... 89.274429594997031: 0.00059952038369305 (1 / 1668) 88.226959817387652: 0.00059952038369305 (1 / 1668) 87.943630537949133: 0.00059952038369305 (1 / 1668) 87.467412370224707: 0.00059952038369305 (1 / 1668) mean = 99.9461 var : performance(3) Probabilities (truncated): 108.556598800640032: 0.00059952038369305 (1 / 1668) 107.48309012714347: 0.00059952038369305 (1 / 1668) 106.977050608345323: 0.00059952038369305 (1 / 1668) 106.879205278283905: 0.00059952038369305 (1 / 1668) ......... 84.41816728577669: 0.00059952038369305 (1 / 1668) 83.417570249296134: 0.00059952038369305 (1 / 1668) 83.279453692764235: 0.00059952038369305 (1 / 1668) 81.680254397514631: 0.00059952038369305 (1 / 1668) mean = 95.7115 */ go ?=> reset_store(), run_model(10_000,$model,[show_probs_rat_trunc,mean, presentation=["skills(1)","skills(2)","skills(3)", "performance(1)","performance(2)","performance(3)"] ]), % fail, nl. go => true. model() => % There are three people, a, b, and c % Each person has an unknown Skill and a % known performance, where the skill is % reflected in the performance (with uncertainties). A = 1, B = 2, C = 3, NumPlayers = 3, % Each player has some fixed skill Skills = normal_dist_n(100,sqrt(10),NumPlayers), % Performances per player % The performance this time is - however - depending on the skill % but might vary. Performance = [ normal_dist(Skills[P],sqrt(15)) : P in 1..NumPlayers], % Now we see that a is better than b and c, and b is better than c. observe(Performance[A] > Performance[B]), observe(Performance[A] > Performance[C]), observe(Performance[B] > Performance[C]), if observed_ok() then add("skills(1)",Skills[1]), add("skills(2)",Skills[2]), add("skills(3)",Skills[3]), % Peformance for each game add("performance(1)",Performance[1]), add("performance(2)",Performance[2]), add("performance(3)",Performance[3]) end.