From 8f7e8e036e9dfc53a5c085b5cb8d7ddda7c34db7 Mon Sep 17 00:00:00 2001 From: Charles Dickens Date: Wed, 8 May 2024 08:42:59 -0700 Subject: [PATCH] Update dickens-arxiv24 to dickens-icml24. --- .../{dickens-arxiv24.json => dickens-icml24.json} | 8 +++++--- .../{dickens-arxiv24.pdf => dickens-icml24.pdf} | Bin 2 files changed, 5 insertions(+), 3 deletions(-) rename _data/pubs/{dickens-arxiv24.json => dickens-icml24.json} (88%) rename assets/resources/{dickens-arxiv24.pdf => dickens-icml24.pdf} (100%) diff --git a/_data/pubs/dickens-arxiv24.json b/_data/pubs/dickens-icml24.json similarity index 88% rename from _data/pubs/dickens-arxiv24.json rename to _data/pubs/dickens-icml24.json index eff505c..5a10b3d 100644 --- a/_data/pubs/dickens-arxiv24.json +++ b/_data/pubs/dickens-icml24.json @@ -1,5 +1,5 @@ { - "type": "unpublished", + "type": "conference", "title": "Convex and Bilevel Optimization for Neuro-Symbolic Inference and Learning", "authors": [ "Charles Dickens", @@ -8,12 +8,14 @@ "Stephen Wright", "Lise Getoor" ], - "venue": "arXiv", + "venue": "International Conference on Machine Learning", "year": "2024", + "publisher": "PMLR", + "address": "Vienna Austria", "links": [ { "label": "paper", - "href": "/assets/resources/dickens-arxiv24.pdf" + "href": "/assets/resources/dickens-icml24.pdf" } ], "abstract": "We address a key challenge for neuro-symbolic (NeSy) systems by leveraging convex and bilevel optimization techniques to develop a general gradient-based framework for end-to-end neural and symbolic parameter learning. The applicability of our framework is demonstrated with NeuPSL, a state-of-the-art NeSy architecture. To achieve this, we propose a smooth primal and dual formulation of NeuPSL inference and show learning gradients are functions of the optimal dual variables. Additionally, we develop a dual block coordinate descent algorithm for the new formulation that naturally exploits warm-starts. This leads to over 100× learning runtime improvements over the current best NeuPSL inference method. Finally, we provide extensive empirical evaluations across 8 datasets covering a range of tasks and demonstrate our learning framework achieves up to a 16% point prediction performance improvement over alternative learning methods.", diff --git a/assets/resources/dickens-arxiv24.pdf b/assets/resources/dickens-icml24.pdf similarity index 100% rename from assets/resources/dickens-arxiv24.pdf rename to assets/resources/dickens-icml24.pdf