@conference {356, title = {Tandem Inference: An Out-of-Core Streaming Algorithm For Very Large-Scale Relational Inference}, booktitle = {AAAI Conference on Artificial Intelligence (AAAI)}, year = {2020}, abstract = {Statistical relational learning (SRL) frameworks allow users to create large, complex graphical models using a compact, rule-based representation. However, these models can quickly become prohibitively large and not fit into machine memory. In this work we address this issue by introducing a novel technique called tandem inference (TI). The primary idea of TI is to combine grounding and inference such that both processes happen in tandem. TI uses an out-of-core streaming approach to overcome memory limitations. Even when memory is not an issue, we show that our proposed approach is able to do inference faster while using less memory than existing approaches. To show the effectiveness of TI, we use a popular SRL framework called Probabilistic Soft Logic (PSL). We implement TI for PSL by proposing a gradient-based inference engine and a streaming approach to grounding. We show that we are able to run an SRL model with over 1B cliques in under nine hours and using only 10 GB of RAM; previous approaches required more than 800 GB for this model and are infeasible on common hardware. To the best of our knowledge, this is the largest SRL model ever run. }, author = {Sriram Srinivasan and Eriq Augustine and Lise Getoor} } @conference {348, title = {Tractable Probabilistic Reasoning Through Effective Grounding}, booktitle = {ICML Workshop on Tractable Probabilistic Modeling (TPM)}, year = {2019}, abstract = {Templated Statistical Relational Learning languages, such as Markov Logic Networks (MLNs) and Probabilistic Soft Logic (PSL), offer much of the expressivity of probabilistic graphical models in a compact form that is intuitive to both experienced modelers and domain experts. However, these languages have historically suffered from tractability issues stemming from the large size of the instantiated models and the complex joint inference performed over these models. Although much research has gone into improving the tractability of these languages using approximate or lifted inference, a relatively small amount of research has gone into improving tractability through efficient instantiation of these large models. In this position paper, we will draw attention to open research areas around efficiently instantiating templated probabilistic models.}, author = {Eriq Augustine and Theodoros Rekatsinas and Lise Getoor} } @conference {326, title = {A Comparison of Bottom-Up Approaches to Grounding for Templated Markov Random Fields}, booktitle = {Machine Learning and Systems (MLSys)}, year = {2018}, url = {https://github.com/eriq-augustine/grounding-experiments}, author = {Eriq Augustine and Lise Getoor} } @article {336, title = {MLTrain: Collective Reasoning With Probabilistic Soft Logic}, year = {2018}, publisher = {Uncertainty in Artificial Intelligence (UAI)}, url = {https://github.com/linqs/psl-examples/tree/uai18}, author = {Eriq Augustine and Golnoosh Farnadi} } @conference {pujara:emnlp17, title = {Sparsity and Noise: Where Knowledge Graph Embeddings Fall Short}, booktitle = {Conference on Empirical Methods in Natural Language Processing (EMNLP)}, year = {2017}, url = {https://github.com/eriq-augustine/meta-kg}, author = {Pujara, Jay and Eriq Augustine and Lise Getoor} }