@article {354, title = {A Declarative Approach to Fairness in Relational Domains}, journal = {IEEE Data Engineering Bulletin}, volume = {42}, year = {2019}, pages = {36--48}, abstract = {AI and machine learning tools are being used with increasing frequency for decision making in domains that affect peoples{\textquoteright} lives such as employment, education, policing and financial qualifications. These uses raise concerns about biases of algorithmic discrimination and have motivated the development of fairness-aware machine learning. However, existing fairness approaches are based solely on attributes of individuals. In many cases, discrimination is much more complex, and taking into account the social, organizational, and other connections between individuals is important. We introduce new notions of fairness that are able to capture the relational structure in a domain. We use first-order logic to provide a flexible and expressive language for specifying complex relational patterns of discrimination. Furthermore, we extend an existing statistical relational learning framework, probabilistic soft logic (PSL), to incorporate our definition of relational fairness. We refer to this fairness-aware framework FairPSL. FairPSL makes use of the logical definitions of fairnesss but also supports a probabilistic interpretation. In particular, we show how to perform maximum a posteriori (MAP) inference by exploiting probabilistic dependencies within the domain while avoiding violations of fairness guarantees. Preliminary empirical evaluation shows that we are able to make both accurate and fair decisions.}, author = {Golnoosh Farnadi and Behrouz Babaki and Lise Getoor} } @conference {341, title = {Lifted Hinge-Loss Markov Random Fields}, booktitle = {AAAI Conference on Artificial Intelligence (AAAI)}, year = {2019}, month = {11/2018}, abstract = {Statistical relational learning models are powerful tools that combine ideas from first-order logic with probabilistic graphical models to represent complex dependencies. Despite their success in encoding large problems with a compact set of weighted rules, performing inference over these models is often challenging. In this paper, we show how to effectively combine two powerful ideas for scaling inference for large graphical models. The first idea, lifted inference, is a wellstudied approach to speeding up inference in graphical models by exploiting symmetries in the underlying problem. The second idea is to frame Maximum a posteriori (MAP) inference as a convex optimization problem and use alternating direction method of multipliers (ADMM) to solve the problem in parallel. A well-studied relaxation to the combinatorial optimization problem defined for logical Markov random fields gives rise to a hinge-loss Markov random field (HLMRF) for which MAP inference is a convex optimization problem. We show how the formalism introduced for coloring weighted bipartite graphs using a color refinement algorithm can be integrated with the ADMM optimization technique to take advantage of the sparse dependency structures of HLMRFs. Our proposed approach, lifted hinge-loss Markov random fields (LHL-MRFs), preserves the structure of the original problem after lifting and solves lifted inference as distributed convex optimization with ADMM. In our empirical evaluation on real-world problems, we observe up to a three times speed up in inference over HL-MRFs.}, author = {Sriram Srinivasan and Behrouz Babaki and Golnoosh Farnadi and Lise Getoor} } @conference {322, title = {Fairness in Relational Domains}, booktitle = {Artificial Intelligence, Ethics, and Society (AIES)}, year = {2018}, abstract = {

AI and machine learning tools are being used with increasing frequency for decision making in domains that affect peoples{\textquoteright} lives such as employment, education, policing and loan approval. These uses raise concerns about biases of algorithmic discrimination and have motivated the development of fairness-aware machine learning. However, existing fairness approaches are based solely on attributes of individuals. In many cases, discrimination is much more complex, and taking into account the social, organizational, and other connections between individuals is important. We introduce new notions of fairness that are able to capture the relational structure in a domain. We use first-order logic to provide a flexible and expressive language for specifying complex relational patterns of discrimination. Furthermore, we extend an existing statistical relational learning framework, probabilistic soft logic (PSL), to incorporate our definition of relational fairness. We refer to this fairness-aware framework FairPSL. FairPSL makes use of the logical definitions of fairnesss but also supports a probabilistic interpretation. In particular, we show how to perform maximum a posteriori(MAP) inference by exploiting probabilistic dependencies within the domain while avoiding violation of fairness guarantees. Preliminary empirical evaluation shows that we are able to make both accurate and fair decisions.

}, author = {Golnoosh Farnadi and Behrouz Babaki and Lise Getoor} } @conference {318, title = {Fairness-aware Relational Learning and Inference}, booktitle = {AAAI Workshop on Declarative Learning Based Programming (DeLBP)}, year = {2018}, author = {Golnoosh Farnadi and Behrouz Babaki and Lise Getoor} }