We present RobinHood, an offline contextual bandit algorithm designed to satisfy a broad family of fairness constraints. Our algorithm accepts multiple fairness definitions and allows users to construct their own unique fairness definitions for the problem at hand. We provide a theoretical analysis of RobinHood, which includes a proof that it will not return an unfair solution with probability greater than a user-specified threshold. We validate our algorithm on three applications: a tutoring system in which we conduct a user study and consider multiple unique fairness definitions; a loan approval setting (using the Statlog German credit data set) in which well-known fairness definitions are applied; and criminal recidivism (using data released by ProPublica). In each setting, our algorithm is able to produce fair policies that achieve performance competitive with other offline and online contextual bandit algorithms.
@inproceedings{Metevier19neurips, author = {Blossom Metevier and Stephen Giguere and Sarah Brockman and Ari Kobren and Yuriy Brun and Emma Brunskill and Philip S. Thomas}, title = {\href{http://people.cs.umass.edu/brun/pubs/pubs/Metevier19neurips.pdf}{Offline Contextual Bandits with High Probability Fairness Guarantees}}, booktitle = {Proceedings of the 33rd Annual Conference on Neural Information Processing Systems (NeurIPS), Advances in Neural Information Processing Systems 32}, venue = {NeurIPS}, address = {Vancouver, BC, Canada}, month = {December}, date = {9--14}, year = {2019}, pages = {14893--14904}, accept = {$\frac{1,428}{6,743} \approx 21\%$}, url = {http://papers.neurips.cc/paper/9630-offline-contextual-bandits-with-high-probability-fairness-guarantees}, abstract = {<p>We present RobinHood, an offline contextual bandit algorithm designed to satisfy a broad family of fairness constraints. Our algorithm accepts multiple fairness definitions and allows users to construct their own unique fairness definitions for the problem at hand. We provide a theoretical analysis of RobinHood, which includes a proof that it will not return an unfair solution with probability greater than a user-specified threshold. We validate our algorithm on three applications: a tutoring system in which we conduct a user study and consider multiple unique fairness definitions; a loan approval setting (using the Statlog German credit data set) in which well-known fairness definitions are applied; and criminal recidivism (using data released by ProPublica). In each setting, our algorithm is able to produce fair policies that achieve performance competitive with other offline and online contextual bandit algorithms.</p>}, fundedBy = {NSF CCF-1453474, NSF IIS-1753968, NSF CCF-1763423}, }