diff --git "a/iclr2022_poster.jsonl" "b/iclr2022_poster.jsonl" new file mode 100644--- /dev/null +++ "b/iclr2022_poster.jsonl" @@ -0,0 +1,100 @@ +{"id": "HndgQudNb91", "original": "REEuy0zWIoj", "number": 4722, "cdate": 1632948118733, "mdate": null, "ddate": null, "tcdate": 1632948118733, "tmdate": 1750551538166, "tddate": null, "forum": "HndgQudNb91", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "Learning to Downsample for Segmentation of Ultra-High Resolution Images", "authorids": ["~Chen_Jin3", "~Ryutaro_Tanno1", "~Thomy_Mertzanidou1", "~Eleftheria_Panagiotaki1", "~Daniel_C._Alexander1"], "authors": ["Chen Jin", "Ryutaro Tanno", "Thomy Mertzanidou", "Eleftheria Panagiotaki", "Daniel C. Alexander"], "keywords": ["ultra-high resolution image segmentation", "non-uniform dowmsampling", "efficient segmentation", "large volume image segmentation", "medical image segmentation"], "abstract": "Many computer vision systems require low-cost segmentation algorithms based on deep learning, either because of the enormous size of input images or limited computational budget. Common solutions uniformly downsample the input images to meet memory constraints, assuming all pixels are equally informative. In this work, we demonstrate that this assumption can harm the segmentation performance\nbecause the segmentation difficulty varies spatially (see Figure 1 “Uniform”). We combat this problem by introducing a learnable downsampling module, which can be optimised together with the given segmentation model in an end-to-end fashion. We formulate the problem of training such downsampling module as optimisation of sampling density distributions over the input images given their low-resolution views. To defend against degenerate solutions (e.g. over-sampling trivial regions like the backgrounds), we propose a regularisation term that encourages the sampling locations to concentrate around the object boundaries. We find the downsampling\nmodule learns to sample more densely at difficult locations, thereby improving the segmentation performance (see Figure 1 \"Ours\"). Our experiments on benchmarks of high-resolution street view, aerial and medical images demonstrate substantial improvements in terms of efficiency-and-accuracy trade-off compared to both uniform downsampling and two recent advanced downsampling techniques.", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "jin|learning_to_downsample_for_segmentation_of_ultrahigh_resolution_images", "pdf": "/pdf/d2ade7120315e0521c4b97b593c4a2ebd44b0652.pdf", "one-sentence_summary": "We propose a method for learning to downsample ultra high-resolution images that reflects the importance of each location.", "data": "", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 1 code implementation](https://www.catalyzex.com/paper/learning-to-downsample-for-segmentation-of/code)", "_bibtex": "@inproceedings{\njin2022learning,\ntitle={Learning to Downsample for Segmentation of Ultra-High Resolution Images},\nauthor={Chen Jin and Ryutaro Tanno and Thomy Mertzanidou and Eleftheria Panagiotaki and Daniel C. Alexander},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=HndgQudNb91}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 13}} +{"id": "7fFO4cMBx_9", "original": "OU7lRMgefPc", "number": 4721, "cdate": 1632948118673, "mdate": null, "ddate": null, "tcdate": 1632948118673, "tmdate": 1750551538201, "tddate": null, "forum": "7fFO4cMBx_9", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "Variational Neural Cellular Automata", "authorids": ["~Rasmus_Berg_Palm1", "~Miguel_González_Duque1", "~Shyam_Sudhakaran1", "~Sebastian_Risi1"], "authors": ["Rasmus Berg Palm", "Miguel González Duque", "Shyam Sudhakaran", "Sebastian Risi"], "keywords": ["Neural Cellular Automata", "Cellular Automata", "Self-Organization", "Generative Models"], "abstract": "In nature, the process of cellular growth and differentiation has lead to an amazing diversity of organisms --- algae, starfish, giant sequoia, tardigrades, and orcas are all created by the same generative process.\nInspired by the incredible diversity of this biological generative process, we propose a generative model, the Variational Neural Cellular Automata (VNCA), which is loosely inspired by the biological processes of cellular growth and differentiation. Unlike previous related works, the VNCA is a proper probabilistic generative model, and we evaluate it according to best practices. We find that the VNCA learns to reconstruct samples well and that despite its relatively few parameters and simple local-only communication, the VNCA can learn to generate a large variety of output from information encoded in a common vector format. While there is a significant gap to the current state-of-the-art in terms of generative modeling performance, we show that the VNCA can learn a purely self-organizing generative process of data. Additionally, the self-organizing nature bestows the VNCA with some inherent robustness against perturbations in the early stages of growth.", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "palm|variational_neural_cellular_automata", "pdf": "/pdf/abec641c2a0c18536da3345e5cd92d673d90b69d.pdf", "one-sentence_summary": "We propose and evaluate the Variational Neural Cellular Automata, a self-organising generative model based on neural cellular automata", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 3 code implementations](https://www.catalyzex.com/paper/variational-neural-cellular-automata/code)", "_bibtex": "@inproceedings{\npalm2022variational,\ntitle={Variational Neural Cellular Automata},\nauthor={Rasmus Berg Palm and Miguel Gonz{\\'a}lez Duque and Shyam Sudhakaran and Sebastian Risi},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=7fFO4cMBx_9}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 17}} +{"id": "FKp8-pIRo3y", "original": "whGXQ0YYNlq", "number": 4719, "cdate": 1632948118548, "mdate": null, "ddate": null, "tcdate": 1632948118548, "tmdate": 1676330442065, "tddate": null, "forum": "FKp8-pIRo3y", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "Wish you were here: Hindsight Goal Selection for long-horizon dexterous manipulation", "authorids": ["~Todor_Davchev1", "~Oleg_Olegovich_Sushkov1", "~Jean-Baptiste_Regli1", "~Stefan_Schaal1", "~Yusuf_Aytar1", "~Markus_Wulfmeier1", "~Jon_Scholz1"], "authors": ["Todor Davchev", "Oleg Olegovich Sushkov", "Jean-Baptiste Regli", "Stefan Schaal", "Yusuf Aytar", "Markus Wulfmeier", "Jon Scholz"], "keywords": ["goal-conditioned reinforcement learning", "learning from demonstrations", "long-horizon dexterous manipulation", "bi-manual manipulation"], "abstract": "Complex sequential tasks in continuous-control settings often require agents to successfully traverse a set of ``narrow passages'' in their state space. Solving such tasks with a sparse reward in a sample-efficient manner poses a challenge to modern reinforcement learning (RL) due to the associated long-horizon nature of the problem and the lack of sufficient positive signal during learning. \nVarious tools have been applied to address this challenge. When available, large sets of demonstrations can guide agent exploration. Hindsight relabelling on the other hand does not require additional sources of information. However, existing strategies explore based on task-agnostic goal distributions, which can render the solution of long-horizon tasks impractical. In this work, we extend hindsight relabelling mechanisms to guide exploration along task-specific distributions implied by a small set of successful demonstrations. We evaluate the approach on four complex, single and dual arm, robotics manipulation tasks against strong suitable baselines. The method requires far fewer demonstrations to solve all tasks and achieves a significantly higher overall performance as task complexity increases. Finally, we investigate the robustness of the proposed solution with respect to the quality of input representations and the number of demonstrations.", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "davchev|wish_you_were_here_hindsight_goal_selection_for_longhorizon_dexterous_manipulation", "pdf": "/pdf/524d4c3cacc5ff7803cd7061b33991511fee7db7.pdf", "supplementary_material": "", "_bibtex": "@inproceedings{\ndavchev2022wish,\ntitle={Wish you were here: Hindsight Goal Selection for long-horizon dexterous manipulation},\nauthor={Todor Davchev and Oleg Olegovich Sushkov and Jean-Baptiste Regli and Stefan Schaal and Yusuf Aytar and Markus Wulfmeier and Jon Scholz},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=FKp8-pIRo3y}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 16}} +{"id": "KntaNRo6R48", "original": "y3mwowfMW6", "number": 4717, "cdate": 1632948118409, "mdate": null, "ddate": null, "tcdate": 1632948118409, "tmdate": 1676330442056, "tddate": null, "forum": "KntaNRo6R48", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "L0-Sparse Canonical Correlation Analysis", "authorids": ["~Ofir_Lindenbaum1", "~Moshe_Salhov1", "~Amir_Averbuch1", "~Yuval_Kluger1"], "authors": ["Ofir Lindenbaum", "Moshe Salhov", "Amir Averbuch", "Yuval Kluger"], "keywords": [], "abstract": "Canonical Correlation Analysis (CCA) models are powerful for studying the associations between two sets of variables. The canonically correlated representations, termed \\textit{canonical variates} are widely used in unsupervised learning to analyze unlabeled multi-modal registered datasets. Despite their success, CCA models may break (or overfit) if the number of variables in either of the modalities exceeds the number of samples. Moreover, often a significant fraction of the variables measures modality-specific information, and thus removing them is beneficial for identifying the \\textit{canonically correlated variates}. Here, we propose $\\ell_0$-CCA, a method for learning correlated representations based on sparse subsets of variables from two observed modalities.\nSparsity is obtained by multiplying the input variables by stochastic gates, whose parameters are learned together with the CCA weights via an $\\ell_0$-regularized correlation loss. \nWe further propose $\\ell_0$-Deep CCA for solving the problem of non-linear sparse CCA by modeling the correlated representations using deep nets. We demonstrate the efficacy of the method using several synthetic and real examples. Most notably, by gating nuisance input variables, our approach improves the extracted representations compared to other linear, non-linear and sparse CCA-based models.", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "lindenbaum|l0sparse_canonical_correlation_analysis", "pdf": "/pdf/69ae8c04ac43812f7523f009313daec68f09ea3d.pdf", "one-sentence_summary": "We propose a new $\\ell_0$-CCA method for learning correlated representations based on sparse subsets of variables from two observed modalities.", "supplementary_material": "/attachment/8bfa11b6b541f2002ba5319d1a0792a920399814.zip", "_bibtex": "@inproceedings{\nlindenbaum2022lsparse,\ntitle={L0-Sparse Canonical Correlation Analysis},\nauthor={Ofir Lindenbaum and Moshe Salhov and Amir Averbuch and Yuval Kluger},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=KntaNRo6R48}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 14}} +{"id": "B7ZbqNLDn-_", "original": "e_CRxIsYxcO", "number": 4715, "cdate": 1632948118280, "mdate": null, "ddate": null, "tcdate": 1632948118280, "tmdate": 1750551538357, "tddate": null, "forum": "B7ZbqNLDn-_", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "Recycling Model Updates in Federated Learning: Are Gradient Subspaces Low-Rank?", "authorids": ["~Sheikh_Shams_Azam1", "~Seyyedali_Hosseinalipour1", "~Qiang_Qiu1", "~Christopher_Brinton1"], "authors": ["Sheikh Shams Azam", "Seyyedali Hosseinalipour", "Qiang Qiu", "Christopher Brinton"], "keywords": ["Distributed Machine Learning", "Federated Learning", "Gradient Subspace", "SGD"], "abstract": "In this paper, we question the rationale behind propagating large numbers of parameters through a distributed system during federated learning. We start by examining the rank characteristics of the subspace spanned by gradients (i.e., the gradient-space) in centralized model training, and observe that the gradient-space often consists of a few leading principal components accounting for an overwhelming majority (95-99%) of the explained variance. Motivated by this, we propose the \"Look-back Gradient Multiplier\" (LBGM) algorithm, which utilizes this low-rank property of the gradient-space in federated learning. Operationally, LBGM recycles the gradients between model update rounds to significantly reduce the number of parameters to be propagated through the system. We analytically characterize the convergence behavior of LBGM, revealing the nature of the trade-off between communication savings and model performance. Our subsequent experimental results demonstrate the improvement LBGM obtains on communication overhead compared to federated learning baselines. Additionally, we show that LBGM is a general plug-and-play algorithm that can be used standalone or stacked on top of existing sparsification techniques for distributed model training.", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "azam|recycling_model_updates_in_federated_learning_are_gradient_subspaces_lowrank", "pdf": "/pdf/76e2433c08e957e7f19a49e6815d0f6b52da92cd.pdf", "one-sentence_summary": "We observe that \"gradient-space is low rank\" and propose the LBGM algorithm that utilitizes this low-rank property to recycle gradients between model update rounds in federated learning.", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 1 code implementation](https://www.catalyzex.com/paper/recycling-model-updates-in-federated-learning/code)", "_bibtex": "@inproceedings{\nazam2022recycling,\ntitle={Recycling Model Updates in Federated Learning: Are Gradient Subspaces Low-Rank?},\nauthor={Sheikh Shams Azam and Seyyedali Hosseinalipour and Qiang Qiu and Christopher Brinton},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=B7ZbqNLDn-_}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 24}} +{"id": "ucASPPD9GKN", "original": "WlGq4GsMbws", "number": 4711, "cdate": 1632875770874, "mdate": null, "ddate": null, "tcdate": 1632875770874, "tmdate": 1750551538553, "tddate": null, "forum": "ucASPPD9GKN", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "Is Homophily a Necessity for Graph Neural Networks?", "authorids": ["~Yao_Ma3", "~Xiaorui_Liu1", "~Neil_Shah2", "~Jiliang_Tang1"], "authors": ["Yao Ma", "Xiaorui Liu", "Neil Shah", "Jiliang Tang"], "keywords": [], "abstract": "Graph neural networks (GNNs) have shown great prowess in learning representations suitable for numerous graph-based machine learning tasks. When applied to semi-supervised node classification, GNNs are widely believed to work well due to the homophily assumption (``like attracts like''), and fail to generalize to heterophilous graphs where dissimilar nodes connect. Recent works design new architectures to overcome such heterophily-related limitations, citing poor baseline performance and new architecture improvements on a few heterophilous graph benchmark datasets as evidence for this notion. In our experiments, we empirically find that standard graph convolutional networks (GCNs) can actually achieve better performance than such carefully designed methods on some commonly used heterophilous graphs. This motivates us to reconsider whether homophily is truly necessary for good GNN performance. We find that this claim is not quite true, and in fact, GCNs can achieve strong performance on heterophilous graphs under certain conditions. Our work carefully characterizes these conditions and provides supporting theoretical understanding and empirical observations. Finally, we examine existing heterophilous graphs benchmarks and reconcile how the GCN (under)performs on them based on this understanding.", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "ma|is_homophily_a_necessity_for_graph_neural_networks", "pdf": "/pdf/dba6b2a528efebfb036a0b908ecfc59201204429.pdf", "supplementary_material": "/attachment/bbde85db7c61a5770e7eb7b6c2abeea42fa8e857.zip", "data": "", "code": "", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 3 code implementations](https://www.catalyzex.com/paper/is-homophily-a-necessity-for-graph-neural/code)", "_bibtex": "@inproceedings{\nma2022is,\ntitle={Is Homophily a Necessity for Graph Neural Networks?},\nauthor={Yao Ma and Xiaorui Liu and Neil Shah and Jiliang Tang},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=ucASPPD9GKN}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 21}} +{"id": "Ve0Wth3ptT_", "original": "JdgIYu5GQua", "number": 4703, "cdate": 1632875770333, "mdate": null, "ddate": null, "tcdate": 1632875770333, "tmdate": 1676330442095, "tddate": null, "forum": "Ve0Wth3ptT_", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "DEGREE: Decomposition Based Explanation for Graph Neural Networks", "authorids": ["~Qizhang_Feng1", "~Ninghao_Liu2", "~Fan_Yang27", "~Ruixiang_Tang1", "~Mengnan_Du1", "~Xia_Hu4"], "authors": ["Qizhang Feng", "Ninghao Liu", "Fan Yang", "Ruixiang Tang", "Mengnan Du", "Xia Hu"], "keywords": ["XAI", "GNN"], "abstract": "Graph Neural Networks (GNNs) are gaining extensive attention for their application in graph data. However, the black-box nature of GNNs prevents users from understanding and trusting the models, thus hampering their applicability. Whereas explaining GNNs remains a challenge, most existing methods fall into approximation based and perturbation based approaches with suffer from faithfulness problems and unnatural artifacts respectively. To tackle these problems, we propose DEGREE (Decomposition based Explanation for GRaph nEural nEtworks) to provide a faithful explanation for GNN predictions. By decomposing the information generation and aggregation mechanism of GNNs, DEGREE allows tracking the contributions of specific components of the input graph to the final prediction. Based on this, we further design a subgraph level interpretation algorithm to reveal complex interactions between graph nodes that are overlooked by previous methods. The efficiency of our algorithm can be further improved by utilizing GNN characteristics. Finally, we conduct quantitative and qualitative experiments on synthetic and real-world datasets to demonstrate the effectiveness of DEGREE on node classification and graph classification tasks.", "one-sentence_summary": "We propose a new decomposition based explanation for Graph Neural Networks.", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "feng|degree_decomposition_based_explanation_for_graph_neural_networks", "pdf": "/pdf/fd7de8640028480fa9fe56dd9ed7bcad9182bf31.pdf", "data": "", "_bibtex": "@inproceedings{\nfeng2022degree,\ntitle={{DEGREE}: Decomposition Based Explanation for Graph Neural Networks},\nauthor={Qizhang Feng and Ninghao Liu and Fan Yang and Ruixiang Tang and Mengnan Du and Xia Hu},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=Ve0Wth3ptT_}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 17}} +{"id": "T0B9AoM_bFg", "original": "owIGL-Id-AN", "number": 4668, "cdate": 1632875768056, "mdate": null, "ddate": null, "tcdate": 1632875768056, "tmdate": 1676330443177, "tddate": null, "forum": "T0B9AoM_bFg", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "Improving Mutual Information Estimation with Annealed and Energy-Based Bounds", "authorids": ["~Rob_Brekelmans1", "~Sicong_Huang1", "~Marzyeh_Ghassemi2", "~Greg_Ver_Steeg1", "~Roger_Baker_Grosse1", "~Alireza_Makhzani1"], "authors": ["Rob Brekelmans", "Sicong Huang", "Marzyeh Ghassemi", "Greg Ver Steeg", "Roger Baker Grosse", "Alireza Makhzani"], "keywords": ["mutual information estimation", "annealed importance sampling", "energy-based models"], "abstract": "Mutual information (MI) is a fundamental quantity in information theory and machine learning. However, direct estimation of MI is intractable, even if the true joint probability density for the variables of interest is known, as it involves estimating a potentially high-dimensional log partition function. In this work, we present a unifying view of existing MI bounds from the perspective of importance sampling, and propose three novel bounds based on this approach. Since a tight MI bound without density information requires a sample size exponential in the true MI, we assume either a single marginal or the full joint density information is known. In settings where the full joint density is available, we propose Multi-Sample Annealed Importance Sampling (AIS) bounds on MI, which we demonstrate can tightly estimate large values of MI in our experiments. In settings where only a single marginal distribution is known, we propose Generalized IWAE (GIWAE) and MINE-AIS bounds. Our GIWAE bound unifies variational and contrastive bounds in a single framework that generalizes InfoNCE, IWAE, and Barber-Agakov bounds. Our MINE-AIS method improves upon existing energy-based methods such as MINE-DV and MINE-F by directly optimizing a tighter lower bound on MI. MINE-AIS uses MCMC sampling to estimate gradients for training and Multi-Sample AIS for evaluating the bound. Our methods are particularly suitable for evaluating MI in deep generative models, since explicit forms of the marginal or joint densities are often available. We evaluate our bounds on estimating the MI of VAEs and GANs trained on the MNIST and CIFAR datasets, and showcase significant gains over existing bounds in these challenging settings with high ground truth MI.", "one-sentence_summary": "We derive new annealed importance sampling and energy-based bounds, resulting in vastly more accurate estimates of mutual information.", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "brekelmans|improving_mutual_information_estimation_with_annealed_and_energybased_bounds", "pdf": "/pdf/a68f8e4bbad21f5599f372c94827c5f596c6555b.pdf", "data": "", "_bibtex": "@inproceedings{\nbrekelmans2022improving,\ntitle={Improving Mutual Information Estimation with Annealed and Energy-Based Bounds},\nauthor={Rob Brekelmans and Sicong Huang and Marzyeh Ghassemi and Greg Ver Steeg and Roger Baker Grosse and Alireza Makhzani},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=T0B9AoM_bFg}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 8}} +{"id": "bp-LJ4y_XC", "original": "VqLa5ifS_oQ", "number": 4662, "cdate": 1632875767646, "mdate": null, "ddate": null, "tcdate": 1632875767646, "tmdate": 1676330443199, "tddate": null, "forum": "bp-LJ4y_XC", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "Sequence Approximation using Feedforward Spiking Neural Network for Spatiotemporal Learning: Theory and Optimization Methods", "authorids": ["~Xueyuan_She1", "~Saurabh_Dash1", "~Saibal_Mukhopadhyay1"], "authors": ["Xueyuan She", "Saurabh Dash", "Saibal Mukhopadhyay"], "keywords": ["spiking neural network", "spatiotemporal processing", "feedforward network"], "abstract": "A dynamical system of spiking neurons with only feedforward connections can classify spatiotemporal patterns without recurrent connections. However, the theoretical construct of a feedforward spiking neural network (SNN) for approximating a temporal sequence remains unclear, making it challenging to optimize SNN architectures for learning complex spatiotemporal patterns. In this work, we establish a theoretical framework to understand and improve sequence approximation using a feedforward SNN. Our framework shows that a feedforward SNN with one neuron per layer and skip-layer connections can approximate the mapping function between any arbitrary pairs of input and output spike train on a compact domain. Moreover, we prove that heterogeneous neurons with varying dynamics and skip-layer connections improve sequence approximation using feedforward SNN. Consequently, we propose SNN architectures incorporating the preceding constructs that are trained using supervised backpropagation-through-time (BPTT) and unsupervised spiking-timing-dependent plasticity (STDP) algorithms for classification of spatiotemporal data. A dual-search-space Bayesian optimization method is developed to optimize architecture and parameters of the proposed SNN with heterogeneous neuron dynamics and skip-layer connections. ", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "she|sequence_approximation_using_feedforward_spiking_neural_network_for_spatiotemporal_learning_theory_and_optimization_methods", "pdf": "/pdf/043f00a3e618d0c71bbd79dffbdfdaf6d9fd4d1b.pdf", "one-sentence_summary": "A theoretical approache to study the approximation capability of feedforward spiking neural network and optimization methods for such network.", "supplementary_material": "/attachment/07bfc11fcbf1f0d165886d9670fb0314518d9a36.zip", "_bibtex": "@inproceedings{\nshe2022sequence,\ntitle={Sequence Approximation using Feedforward Spiking Neural Network for Spatiotemporal Learning: Theory and Optimization Methods},\nauthor={Xueyuan She and Saurabh Dash and Saibal Mukhopadhyay},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=bp-LJ4y_XC}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 17}} +{"id": "nwKXyFvaUm", "original": "jJprcUgbTo6", "number": 4660, "cdate": 1632875767512, "mdate": null, "ddate": null, "tcdate": 1632875767512, "tmdate": 1676330443254, "tddate": null, "forum": "nwKXyFvaUm", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "Diverse Client Selection for Federated Learning via Submodular Maximization", "authorids": ["~Ravikumar_Balakrishnan1", "~Tian_Li1", "~Tianyi_Zhou1", "nageen.himayat@intel.com", "~Virginia_Smith1", "~Jeff_Bilmes1"], "authors": ["Ravikumar Balakrishnan", "Tian Li", "Tianyi Zhou", "Nageen Himayat", "Virginia Smith", "Jeff Bilmes"], "keywords": ["federated learning", "submodularity", "diversity"], "abstract": "In every communication round of federated learning, a random subset of clients communicate their model updates back to the server which then aggregates them all. The optimal size of this subset is not known and several studies have shown that typically random selection does not perform very well in terms of convergence, learning efficiency and fairness. We, in this paper, propose to select a small diverse subset of clients, namely those carrying representative gradient information, and we transmit only these updates to the server. Our aim is for updating via only a subset to approximate updating via aggregating all client information. We achieve this by choosing a subset that maximizes a submodular facility location function defined over gradient space. We introduce “federated averaging with diverse client selection (DivFL)”. We provide a thorough analysis of its convergence in the heterogeneous setting and apply it both to synthetic and to real datasets. Empirical results show several benefits to our approach including improved learning efficiency, faster convergence and also more uniform (i.e., fair) performance across clients. We further show a communication-efficient version of DivFL that can still outperform baselines on the above metrics.", "one-sentence_summary": "The paper addresses a key challenge of selecting the most representative clients iteratively for federated learning through formulating it as a submodular optimization problem and developing efficient algorithms.", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "balakrishnan|diverse_client_selection_for_federated_learning_via_submodular_maximization", "pdf": "/pdf/4d539789e55d133a96781cda576be4ab34ec5982.pdf", "_bibtex": "@inproceedings{\nbalakrishnan2022diverse,\ntitle={Diverse Client Selection for Federated Learning via Submodular Maximization},\nauthor={Ravikumar Balakrishnan and Tian Li and Tianyi Zhou and Nageen Himayat and Virginia Smith and Jeff Bilmes},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=nwKXyFvaUm}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 13}} +{"id": "jT1EwXu-4hj", "original": "AJiP8QDRsVH", "number": 4651, "cdate": 1632875766895, "mdate": null, "ddate": null, "tcdate": 1632875766895, "tmdate": 1750551539122, "tddate": null, "forum": "jT1EwXu-4hj", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "From Intervention to Domain Transportation: A Novel Perspective to Optimize Recommendation", "authorids": ["~Da_Xu2", "~Yuting_Ye3", "~Chuanwei_Ruan1", "~Evren_Korpeoglu1", "~Sushant_Kumar1", "~Kannan_Achan1"], "authors": ["Da Xu", "Yuting Ye", "Chuanwei Ruan", "Evren Korpeoglu", "Sushant Kumar", "Kannan Achan"], "keywords": ["Information retrieval", "Learning theory", "Causal inference", "Missing data", "Overlapping", "Reweighting", "Optimal transport"], "abstract": "The interventional nature of recommendation has attracted increasing attention in recent years. It particularly motivates researchers to formulate learning and evaluating recommendation as causal inference and data missing-not-at-random problems. However, few take seriously the consequence of violating the critical assumption of overlapping, which we prove can significantly threaten the validity and interpretation of the outcome. We find a critical piece missing in the current understanding of information retrieval (IR) systems: as interventions, recommendation not only affects the already observed data, but it also interferes with the target domain (distribution) of interest. We then rephrase optimizing recommendation as finding an intervention that best transports the patterns it learns from the observed domain to its intervention domain. Towards this end, we use domain transportation to characterize the learning-intervention mechanism of recommendation. We design a principled transportation-constraint risk minimization objective and convert it to a two-player minimax game.\nWe prove the consistency, generalization, and excessive risk bounds for the proposed objective, and elaborate how they compare to the current results. Finally, we carry out extensive real-data and semi-synthetic experiments to demonstrate the advantage of our approach, and launch online testing with a real-world IR system.", "one-sentence_summary": "We propose and study a novel domain-transportation view for optimizing recommendation for information retrieval systems.", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "xu|from_intervention_to_domain_transportation_a_novel_perspective_to_optimize_recommendation", "pdf": "/pdf/22322b458fd437ff0b3cf13debd29cc381b25ccc.pdf", "supplementary_material": "/attachment/4b6d99ed3156754a98761a428ef733a1d68bf4ed.zip", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 5 code implementations](https://www.catalyzex.com/paper/from-intervention-to-domain-transportation-a/code)", "_bibtex": "@inproceedings{\nxu2022from,\ntitle={From Intervention to Domain Transportation: A Novel Perspective to Optimize Recommendation},\nauthor={Da Xu and Yuting Ye and Chuanwei Ruan and Evren Korpeoglu and Sushant Kumar and Kannan Achan},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=jT1EwXu-4hj}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 11}} +{"id": "JxFgJbZ-wft", "original": "pqcDjdr1zCf", "number": 4647, "cdate": 1632875766626, "mdate": null, "ddate": null, "tcdate": 1632875766626, "tmdate": 1750551539189, "tddate": null, "forum": "JxFgJbZ-wft", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "Variational Predictive Routing with Nested Subjective Timescales", "authorids": ["~Alexey_Zakharov1", "~Qinghai_Guo1", "~Zafeirios_Fountas1"], "authors": ["Alexey Zakharov", "Qinghai Guo", "Zafeirios Fountas"], "keywords": ["Hierarchical temporal abstraction", "event discovery", "hierarchical generative models", "variational inference"], "abstract": "Discovery and learning of an underlying spatiotemporal hierarchy in sequential data is an important topic for machine learning. Despite this, little work has been done to explore hierarchical generative models that can flexibly adapt their layerwise representations in response to datasets with different temporal dynamics. Here, we present Variational Predictive Routing (VPR) – a neural probabilistic inference system that organizes latent representations of video features in a temporal hierarchy, based on their rates of change, thus modeling continuous data as a hierarchical renewal process. By employing an event detection mechanism that relies solely on the system’s latent representations (without the need of a separate model), VPR is able to dynamically adjust its internal state following changes in the observed features, promoting an optimal organisation of representations across the levels of the model’s latent hierarchy. Using several video datasets, we show that VPR is able to detect event boundaries, disentangle spatiotemporal features across its hierarchy, adapt to the dynamics of the data, and produce accurate time-agnostic rollouts of the future. Our approach integrates insights from neuroscience and introduces a framework with high potential for applications in model-based reinforcement learning, where flexible and informative state-space rollouts are of particular interest.", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "zakharov|variational_predictive_routing_with_nested_subjective_timescales", "pdf": "/pdf/712c74938a55973dd0b3f46e154fc0696194b578.pdf", "one-sentence_summary": "Variational inference hierarchical model that relies on a change detection mechanism to impose a nested temporal hierarchy on its latent structure.", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 2 code implementations](https://www.catalyzex.com/paper/variational-predictive-routing-with-nested/code)", "_bibtex": "@inproceedings{\nzakharov2022variational,\ntitle={Variational Predictive Routing with Nested Subjective Timescales},\nauthor={Alexey Zakharov and Qinghai Guo and Zafeirios Fountas},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=JxFgJbZ-wft}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 27}} +{"id": "RhB1AdoFfGE", "original": "IVBi_OS_zA", "number": 4630, "cdate": 1632875765523, "mdate": null, "ddate": null, "tcdate": 1632875765523, "tmdate": 1750551539576, "tddate": null, "forum": "RhB1AdoFfGE", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "Sample and Computation Redistribution for Efficient Face Detection", "authorids": ["~Jia_Guo1", "~Jiankang_Deng1", "~Alexandros_Lattas1", "~Stefanos_Zafeiriou1"], "authors": ["Jia Guo", "Jiankang Deng", "Alexandros Lattas", "Stefanos Zafeiriou"], "keywords": ["efficient face detection", "computation redistribution", "sample redistribution"], "abstract": "Although tremendous strides have been made in uncontrolled face detection, accurate face detection with a low computation cost remains an open challenge. In this paper, we point out that computation distribution and scale augmentation are the keys to detecting small faces from low-resolution images. Motivated by these observations, we introduce two simple but effective methods: (1) Computation Redistribution (CR), which reallocates the computation between the backbone, neck and head of the model; and (2) Sample Redistribution (SR), which augments training samples for the most needed stages. The proposed Sample and Computation Redistribution for Face Detection (SCRFD) is implemented by a random search in a meticulously designed search space. Extensive experiments conducted on WIDER FACE demonstrate the state-of-the-art accuracy-efficiency trade-off for the proposed SCRFD family across a wide range of compute regimes. In particular, SCRFD-34GF outperforms the best competitor, TinaFace, by $4.78\\%$ (AP at hard set) while being more than 3$\\times$ faster on GPUs with VGA-resolution images. Code is available at: https://github.com/deepinsight/insightface/tree/master/detection/scrfd.", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "guo|sample_and_computation_redistribution_for_efficient_face_detection", "pdf": "/pdf/d7b9dd38011f418b1c66bb378aef38a25d8c9bf5.pdf", "one-sentence_summary": "We search for optimised computation distribution and training sample distribution for the task of face detection.", "supplementary_material": "/attachment/f9c1bf1166045e338636898678f68d9057ac2060.zip", "code": "", "data": "", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 1 code implementation](https://www.catalyzex.com/paper/sample-and-computation-redistribution-for/code)", "_bibtex": "@inproceedings{\nguo2022sample,\ntitle={Sample and Computation Redistribution for Efficient Face Detection},\nauthor={Jia Guo and Jiankang Deng and Alexandros Lattas and Stefanos Zafeiriou},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=RhB1AdoFfGE}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 12}} +{"id": "NkZq4OEYN-", "original": "uo_XNKf3A6V", "number": 4629, "cdate": 1632875765455, "mdate": null, "ddate": null, "tcdate": 1632875765455, "tmdate": 1750551539935, "tddate": null, "forum": "NkZq4OEYN-", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "Sound Adversarial Audio-Visual Navigation", "authorids": ["~Yinfeng_Yu1", "~Wenbing_Huang1", "~Fuchun_Sun2", "~Changan_Chen2", "~Yikai_Wang2", "~Xiaohong_Liu3"], "authors": ["Yinfeng Yu", "Wenbing Huang", "Fuchun Sun", "Changan Chen", "Yikai Wang", "Xiaohong Liu"], "keywords": [], "abstract": "Audio-visual navigation task requires an agent to find a sound source in a realistic, unmapped 3D environment by utilizing egocentric audio-visual observations. Existing audio-visual navigation works assume a clean environment that solely contains the target sound, which, however, would not be suitable in most real-world applications due to the unexpected sound noise or intentional interference. In this work, we design an acoustically complex environment in which, besides the target sound, there exists a sound attacker playing a zero-sum game with the agent. More specifically, the attacker can move and change the volume and category of the sound to make the agent suffer from finding the sounding object while the agent tries to dodge the attack and navigate to the goal under the intervention. Under certain constraints to the attacker, we can improve the robustness of the agent towards unexpected sound attacks in audio-visual navigation. For better convergence, we develop a joint training mechanism by employing the property of a centralized critic with decentralized actors. Experiments on two real-world 3D scan datasets, Replica, and Matterport3D, verify the effectiveness and the robustness of the agent trained under our designed environment when transferred to the clean environment or the one containing sound attackers with random policy. Project: https://yyf17.github.io/SAAVN .", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "yu|sound_adversarial_audiovisual_navigation", "pdf": "/pdf/892cdd541646cc28a0880494951fbd89079c2a3d.pdf", "one-sentence_summary": "This work aims to do an adversarial sound intervention for robust audio-visual navigation.", "supplementary_material": "/attachment/7645c11196a90a3cf589f48b53a0b82fb5a56a1c.zip", "data": "", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 2 code implementations](https://www.catalyzex.com/paper/sound-adversarial-audio-visual-navigation/code)", "_bibtex": "@inproceedings{\nyu2022sound,\ntitle={Sound Adversarial Audio-Visual Navigation},\nauthor={Yinfeng Yu and Wenbing Huang and Fuchun Sun and Changan Chen and Yikai Wang and Xiaohong Liu},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=NkZq4OEYN-}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 6}} +{"id": "12RoR2o32T", "original": "sXdqZAvtLR8v", "number": 4618, "cdate": 1632875764764, "mdate": null, "ddate": null, "tcdate": 1632875764764, "tmdate": 1676330444548, "tddate": null, "forum": "12RoR2o32T", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "Out-of-distribution Generalization in the Presence of Nuisance-Induced Spurious Correlations", "authorids": ["~Aahlad_Manas_Puli1", "~Lily_H_Zhang1", "~Eric_Karl_Oermann1", "~Rajesh_Ranganath2"], "authors": ["Aahlad Manas Puli", "Lily H Zhang", "Eric Karl Oermann", "Rajesh Ranganath"], "keywords": ["spurious correlations", "out of distribution generalization", "ml for health", "representation learning"], "abstract": "In many prediction problems, spurious correlations are induced by a changing relationship between the label and a nuisance variable that is also correlated with the covariates. For example, in classifying animals in natural images, the background, which is a nuisance, can predict the type of animal. This nuisance-label relationship does not always hold, and the performance of a model trained under one such relationship may be poor on data with a different nuisance-label relationship. To build predictive models that perform well regardless of the nuisance-label relationship, we develop Nuisance-Randomized Distillation (NURD). We introduce the nuisance-randomized distribution, a distribution where the nuisance and the label are independent. Under this distribution, we define the set of representations such that conditioning on any member, the nuisance and the label remain independent. We prove that the representations in this set always perform better than chance, while representations outside of this set may not. NURD finds a representation from this set that is most informative of the label under the nuisance-randomized distribution, and we prove that this representation achieves the highest performance regardless of the nuisance-label relationship. We evaluate NURD on several tasks including chest X-ray classification where, using non-lung patches as the nuisance, NURD produces models that predict pneumonia under strong spurious correlations.", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "puli|outofdistribution_generalization_in_the_presence_of_nuisanceinduced_spurious_correlations", "pdf": "/pdf/7128d52f12e20439db2d07083f3de3995967bb53.pdf", "one-sentence_summary": "This paper build models robust to nuisance-induced spurious correlations by constructing a representation that distills out the influence of the nuisance variables, while also maximizing its information with the label.", "data": "", "_bibtex": "@inproceedings{\npuli2022outofdistribution,\ntitle={Out-of-distribution Generalization in the Presence of Nuisance-Induced Spurious Correlations},\nauthor={Aahlad Manas Puli and Lily H Zhang and Eric Karl Oermann and Rajesh Ranganath},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=12RoR2o32T}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 6}} +{"id": "OM_lYiHXiCL", "original": "BmArmBBCwr", "number": 4615, "cdate": 1632875764556, "mdate": null, "ddate": null, "tcdate": 1632875764556, "tmdate": 1750551540078, "tddate": null, "forum": "OM_lYiHXiCL", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "AEVA: Black-box Backdoor Detection Using Adversarial Extreme Value Analysis", "authorids": ["~Junfeng_Guo2", "~Ang_Li1", "~Cong_Liu2"], "authors": ["Junfeng Guo", "Ang Li", "Cong Liu"], "keywords": [], "abstract": "Deep neural networks (DNNs) are proved to be vulnerable against backdoor attacks. A backdoor could be embedded in the target DNNs through injecting a backdoor trigger into the training examples, which can cause the target DNNs misclassify an input attached with the backdoor trigger. Recent backdoor detection methods often require the access to the original poisoned training data, the parameters of the target DNNs, or the predictive confidence for each given input, which are impractical in many real-world applications, e.g., on-device de-ployed DNNs. We address the black-box hard-label backdoor detection problem where the DNN is a fully black-box and only its final output label is accessible. We approach this problem from the optimization perspective and show that the objective of backdoor detection is bounded by an adversarial objective. Further theoretical and empirical studies reveal that this adversarial objective leads to a solution with highly skewed distribution; a singularity is often observed in the adversarial map of a backdoor-infected example, which we call the adversarial singularity phenomenon. Based on this observation, we propose the adversarial extreme value analysis(AEVA) algorithm to detect backdoors in black-box neural networks. The AEVA algorithm is based on an extreme value analysis on the adversarial map, computed from the monte-carlo gradient estimation due to the black-box hard-label constraint. Evidenced by extensive experiments across three popular tasks and backdoor attacks, our approach is shown effective in detecting backdoor attacks under the black-box hard-label scenarios", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "guo|aeva_blackbox_backdoor_detection_using_adversarial_extreme_value_analysis", "pdf": "/pdf/b8ad85b4ddd615a5abac4d7c1d5713fc92b9f0e9.pdf", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 3 code implementations](https://www.catalyzex.com/paper/aeva-black-box-backdoor-detection-using/code)", "_bibtex": "@inproceedings{\nguo2022aeva,\ntitle={{AEVA}: Black-box Backdoor Detection Using Adversarial Extreme Value Analysis},\nauthor={Junfeng Guo and Ang Li and Cong Liu},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=OM_lYiHXiCL}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 7}} +{"id": "5ECQL05ub0J", "original": "g9Kfym-I2Qa", "number": 4609, "cdate": 1632875764141, "mdate": null, "ddate": null, "tcdate": 1632875764141, "tmdate": 1676330444818, "tddate": null, "forum": "5ECQL05ub0J", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "Resonance in Weight Space: Covariate Shift Can Drive Divergence of SGD with Momentum", "authorids": ["~Kirby_Banman1", "~Garnet_Liam_Peet-Pare1", "~Nidhi_Hegde1", "~Alona_Fyshe1", "~Martha_White1"], "authors": ["Kirby Banman", "Garnet Liam Peet-Pare", "Nidhi Hegde", "Alona Fyshe", "Martha White"], "keywords": ["optimization", "momentum", "stochastic gradient descent", "non-iid sampling"], "abstract": "Most convergence guarantees for stochastic gradient descent with momentum (SGDm) rely on iid sampling. Yet, SGDm is often used outside this regime, in settings with temporally correlated input samples such as continual learning and reinforcement learning. Existing work has shown that SGDm with a decaying step-size can converge under Markovian temporal correlation. In this work, we show that SGDm under covariate shift with a fixed step-size can be unstable and diverge. In particular, we show SGDm under covariate shift is a parametric oscillator, and so can suffer from a phenomenon known as resonance. We approximate the learning system as a time varying system of ordinary differential equations, and leverage existing theory to characterize the system's divergence/convergence as resonant/nonresonant modes. The theoretical result is limited to the linear setting with periodic covariate shift, so we empirically supplement this result to show that resonance phenomena persist even under non-periodic covariate shift, nonlinear dynamics with neural networks, and optimizers other than SGDm.", "one-sentence_summary": "We show that SGDm under covariate shift with fixed step-size can be unstable and diverge due to a phenomenon known as parametric resonance.", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "banman|resonance_in_weight_space_covariate_shift_can_drive_divergence_of_sgd_with_momentum", "pdf": "/pdf/967691b8c1cb517500d87dfd7dbf7dd6293c0e89.pdf", "supplementary_material": "/attachment/0945c42d80cb1110d6da871961508649b30acbe7.zip", "_bibtex": "@inproceedings{\nbanman2022resonance,\ntitle={Resonance in Weight Space: Covariate Shift Can Drive Divergence of {SGD} with Momentum},\nauthor={Kirby Banman and Garnet Liam Peet-Pare and Nidhi Hegde and Alona Fyshe and Martha White},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=5ECQL05ub0J}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 15}} +{"id": "WqoBaaPHS-", "original": "R9VjyF_alUQ", "number": 4592, "cdate": 1632875762987, "mdate": null, "ddate": null, "tcdate": 1632875762987, "tmdate": 1676330445201, "tddate": null, "forum": "WqoBaaPHS-", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "Top-label calibration and multiclass-to-binary reductions", "authorids": ["~Chirag_Gupta1", "~Aaditya_Ramdas2"], "authors": ["Chirag Gupta", "Aaditya Ramdas"], "keywords": ["calibration", "multiclass", "uncertainty quantification", "distribution-free", "histogram binning"], "abstract": "We propose a new notion of multiclass calibration called top-label calibration. A classifier is said to be top-label calibrated if the reported probability for the predicted class label---the top-label---is calibrated, conditioned on the top-label. This conditioning is essential for practical utility of the calibration property, since the top-label is always reported and we must condition on what is reported. However, the popular notion of confidence calibration erroneously skips this conditioning. Furthermore, we outline a multiclass-to-binary (M2B) reduction framework that unifies confidence, top-label, and class-wise calibration, among others. As its name suggests, M2B works by reducing multiclass calibration to different binary calibration problems; various types of multiclass calibration can then be achieved using simple binary calibration routines. We instantiate the M2B framework with the well-studied histogram binning (HB) binary calibrator, and prove that the overall procedure is multiclass calibrated without making any assumptions on the underlying data distribution. In an empirical evaluation with four deep net architectures on CIFAR-10 and CIFAR-100, we find that the M2B + HB procedure achieves lower top-label and class-wise calibration error than other approaches such as temperature scaling. Code for this work is available at https://github.com/aigen/df-posthoc-calibration.", "one-sentence_summary": "We propose top-label calibration, a new and arguably natural notion for multiclass calibration, along with 'wrapper' calibration algorithms that reduce multiclass calibration to binary calibration.", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "gupta|toplabel_calibration_and_multiclasstobinary_reductions", "pdf": "/pdf/a580ad8d84d1a31adcccb9f9e2102c3b503121df.pdf", "supplementary_material": "/attachment/8c0bb4e26bc860bba21f67effcdb9613904271cd.zip", "code": "", "data": "", "_bibtex": "@inproceedings{\ngupta2022toplabel,\ntitle={Top-label calibration and multiclass-to-binary reductions},\nauthor={Chirag Gupta and Aaditya Ramdas},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=WqoBaaPHS-}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 19}} +{"id": "JfaWawZ8BmX", "original": "lF-Kjj8LkOP", "number": 4589, "cdate": 1632875762790, "mdate": null, "ddate": null, "tcdate": 1632875762790, "tmdate": 1676330445354, "tddate": null, "forum": "JfaWawZ8BmX", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "Anisotropic Random Feature Regression in High Dimensions", "authorids": ["~Gabriel_Mel1", "~Jeffrey_Pennington1"], "authors": ["Gabriel Mel", "Jeffrey Pennington"], "keywords": ["random feature models", "high dimensional asymptotics", "generalization", "learning curves", "double descent", "multiple descent", "alignment"], "abstract": "In contrast to standard statistical wisdom, modern learning algorithms typically find their best performance in the overparameterized regime in which the model has many more parameters than needed to fit the training data. A growing number of recent works have shown that random feature models can offer a detailed theoretical explanation for this unexpected behavior, but typically these analyses have utilized isotropic distributional assumptions on the underlying data generation process, thereby failing to provide a realistic characterization of real-world models that are designed to identify and harness the structure in natural data. In this work, we examine the high-dimensional asymptotics of random feature regression in the presence of structured data, allowing for arbitrary input correlations and arbitrary alignment between the data and the weights of the target function. We define a partial order on the space of weight-data alignments and prove that generalization performance improves in response to stronger alignment. We also clarify several previous observations in the literature by distinguishing the behavior of the sample-wise and parameter-wise learning curves, finding that sample-wise multiple descent can occur at scales dictated by the eigenstructure of the data covariance, but that parameter-wise multiple descent is limited to double descent, although strong anisotropy can induce additional signatures such as wide plateaus and steep cliffs. Finally, these signatures are related to phase transitions in the spectrum of the feature kernel matrix, and unlike the double descent peak, persist even under optimal regularization.", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "mel|anisotropic_random_feature_regression_in_high_dimensions", "pdf": "/pdf/bc2ddad146bd93609c8510aac28ae824072d1832.pdf", "one-sentence_summary": "We derive exact asymptotic formulas for the total error, bias, and variance of random feature regression with anisotropic inputs and target weights, and identify a new type of singularity in sample-wise learning curves. ", "supplementary_material": "/attachment/3b2a48f01870eb81d540e9c5ce4f7b87bd016e94.zip", "_bibtex": "@inproceedings{\nmel2022anisotropic,\ntitle={Anisotropic Random Feature Regression in High Dimensions},\nauthor={Gabriel Mel and Jeffrey Pennington},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=JfaWawZ8BmX}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 20}} +{"id": "L01Nn_VJ9i", "original": "RLGcEEsJEgG", "number": 4586, "cdate": 1632875762583, "mdate": null, "ddate": null, "tcdate": 1632875762583, "tmdate": 1750551540939, "tddate": null, "forum": "L01Nn_VJ9i", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "Back2Future: Leveraging Backfill Dynamics for Improving Real-time Predictions in Future", "authorids": ["~Harshavardhan_Kamarthi1", "~Alexander_Rodríguez1", "~B._Aditya_Prakash2"], "authors": ["Harshavardhan Kamarthi", "Alexander Rodríguez", "B. Aditya Prakash"], "keywords": ["Epidemic Forecasting", "Data revisions", "Graph Representation learning", "Time Series Forecasting"], "abstract": "For real-time forecasting in domains like public health and macroeconomics, data collection is a non-trivial and demanding task. Often after being initially released, it undergoes several revisions later (maybe due to human or technical constraints) - as a result, it may take weeks until the data reaches a stable value. This so-called ‘backfill’ phenomenon and its effect on model performance have been barely addressed in the prior literature. In this paper, we introduce the multi-variate backfill problem using COVID-19 as the motivating example. \nWe construct a detailed dataset composed of relevant signals over the past year of the pandemic. \nWe then systematically characterize several patterns in backfill dynamics and leverage our observations for formulating a novel problem and neural framework, Back2Future, that aims to refines a given model's predictions in real-time. Our extensive experiments demonstrate that our method refines the performance of the diverse set of top models for COVID-19 forecasting and GDP growth forecasting. Specifically, we show that Back2Future refined top COVID-19 models by 6.65% to 11.24% and yield an 18% improvement over non-trivial baselines. In addition, we show that our model improves model evaluation too; hence policy-makers can better understand the true accuracy of forecasting models in real-time.", "one-sentence_summary": "We study the problem of multi-variate backfill for both features and targets and show how to leverage our insights for more general neural framework to improve both model predictions and evaluation", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "kamarthi|back2future_leveraging_backfill_dynamics_for_improving_realtime_predictions_in_future", "pdf": "/pdf/5ff5a41a0773c6764d009a86a74cce3dd35e8ec3.pdf", "supplementary_material": "/attachment/2484a196fd983b98ffd8c549360fbe0f13543350.zip", "code": "", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 1 code implementation](https://www.catalyzex.com/paper/back2future-leveraging-backfill-dynamics-for/code)", "_bibtex": "@inproceedings{\nkamarthi2022backfuture,\ntitle={Back2Future: Leveraging Backfill Dynamics for Improving Real-time Predictions in Future},\nauthor={Harshavardhan Kamarthi and Alexander Rodr{\\'\\i}guez and B. Aditya Prakash},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=L01Nn_VJ9i}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 8}} +{"id": "lrocYB-0ST2", "original": "zFLFovIhB4E", "number": 4570, "cdate": 1632875761524, "mdate": null, "ddate": null, "tcdate": 1632875761524, "tmdate": 1676330446207, "tddate": null, "forum": "lrocYB-0ST2", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "Approximation and Learning with Deep Convolutional Models: a Kernel Perspective", "authorids": ["~Alberto_Bietti1"], "authors": ["Alberto Bietti"], "keywords": ["kernel methods", "deep learning theory", "convolution", "approximation", "generalization"], "abstract": "The empirical success of deep convolutional networks on tasks involving high-dimensional data such as images or audio suggests that they can efficiently approximate certain functions that are well-suited for such tasks. In this paper, we study this through the lens of kernel methods, by considering simple hierarchical kernels with two or three convolution and pooling layers, inspired by convolutional kernel networks. These achieve good empirical performance on standard vision datasets, while providing a precise description of their functional space that yields new insights on their inductive bias. We show that the RKHS consists of additive models of interaction terms between patches, and that its norm encourages spatial similarities between these terms through pooling layers. We then provide generalization bounds which illustrate how pooling and patches yield improved sample complexity guarantees when the target function presents such regularities.", "one-sentence_summary": "We study the inductive bias of multi-layer convolutional models through a kernel lens, showing generalization benefits of various architectural choices such as locality, depth, and pooling layers.", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "bietti|approximation_and_learning_with_deep_convolutional_models_a_kernel_perspective", "pdf": "/pdf/35eeb8c9531f39eb14e07db8fb296d38b7f1a369.pdf", "supplementary_material": "/attachment/f97f90f40e8281a4aaf207b0702056a29a294971.zip", "code": "", "_bibtex": "@inproceedings{\nbietti2022approximation,\ntitle={Approximation and Learning with Deep Convolutional Models: a Kernel Perspective},\nauthor={Alberto Bietti},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=lrocYB-0ST2}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 11}} +{"id": "vgqS1vkkCbE", "original": "EGWmWbNRAzPC", "number": 4569, "cdate": 1632875761459, "mdate": null, "ddate": null, "tcdate": 1632875761459, "tmdate": 1750551541174, "tddate": null, "forum": "vgqS1vkkCbE", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "Value Function Spaces: Skill-Centric State Abstractions for Long-Horizon Reasoning", "authorids": ["~Dhruv_Shah1", "~Peng_Xu9", "~Yao_Lu13", "~Ted_Xiao1", "~Alexander_T_Toshev1", "~Sergey_Levine1", "~brian_ichter1"], "authors": ["Dhruv Shah", "Peng Xu", "Yao Lu", "Ted Xiao", "Alexander T Toshev", "Sergey Levine", "brian ichter"], "keywords": ["hierarchical reinforcement learning", "planning", "representation learning", "robotics"], "abstract": "Reinforcement learning can train policies that effectively perform complex tasks. However for long-horizon tasks, the performance of these methods degrades with horizon, often necessitating reasoning over and chaining lower-level skills. Hierarchical reinforcement learning aims to enable this by providing a bank of low-level skills as action abstractions. Hierarchies can further improve on this by abstracting the space states as well. We posit that a suitable state abstraction should depend on the capabilities of the available lower-level policies. We propose Value Function Spaces: a simple approach that produces such a representation by using the value functions corresponding to each lower-level skill. These value functions capture the affordances of the scene, thus forming a representation that compactly abstracts task relevant information and robustly ignores distractors. Empirical evaluations for maze-solving and robotic manipulation tasks demonstrate that our approach improves long-horizon performance and enables better zero-shot generalization than alternative model-free and model-based methods.", "pdf": "/pdf/c49d03d6fc757e37898cc5399159de2e30589146.pdf", "one-sentence_summary": "We introduce value function spaces, a learned representation of state through the values of low-level skills, which capture affordances and ignores distractors to enable long-horizon reasoning and zero-shot generalization.", "supplementary_material": "", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "shah|value_function_spaces_skillcentric_state_abstractions_for_longhorizon_reasoning", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 4 code implementations](https://www.catalyzex.com/paper/value-function-spaces-skill-centric-state/code)", "_bibtex": "@inproceedings{\nshah2022value,\ntitle={Value Function Spaces: Skill-Centric State Abstractions for Long-Horizon Reasoning},\nauthor={Dhruv Shah and Alexander T Toshev and Sergey Levine and brian ichter},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=vgqS1vkkCbE}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 17}} +{"id": "gNp54NxHUPJ", "original": "5e0Zyvu70R", "number": 4544, "cdate": 1632875759918, "mdate": null, "ddate": null, "tcdate": 1632875759918, "tmdate": 1676330446609, "tddate": null, "forum": "gNp54NxHUPJ", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "Fast Regression for Structured Inputs", "authorids": ["~Raphael_A_Meyer1", "~Cameron_N_Musco1", "~Christopher_P_Musco1", "~David_Woodruff1", "~Samson_Zhou1"], "authors": ["Raphael A Meyer", "Cameron N Musco", "Christopher P Musco", "David Woodruff", "Samson Zhou"], "keywords": ["regression", "sublinear time algorithm", "structured input"], "abstract": "We study the $\\ell_p$ regression problem, which requires finding $\\mathbf{x}\\in\\mathbb R^{d}$ that minimizes $\\|\\mathbf{A}\\mathbf{x}-\\mathbf{b}\\|_p$ for a matrix $\\mathbf{A}\\in\\mathbb R^{n \\times d}$ and response vector $\\mathbf{b}\\in\\mathbb R^{n}$. There has been recent interest in developing subsampling methods for this problem that can outperform standard techniques when $n$ is very large. However, all known subsampling approaches have run time that depends exponentially on $p$, typically, $d^{\\mathcal{O}(p)}$, which can be prohibitively expensive. \n\nWe improve on this work by showing that for a large class of common \\emph{structured matrices}, such as combinations of low-rank matrices, sparse matrices, and Vandermonde matrices, there are subsampling based methods for $\\ell_p$ regression that depend polynomially on $p$. For example, we give an algorithm for $\\ell_p$ regression on Vandermonde matrices that runs in time $\\mathcal{O}(n\\log^3 n+(dp^2)^{0.5+\\omega}\\cdot\\text{polylog}\\,n)$, where $\\omega$ is the exponent of matrix multiplication. The polynomial dependence on $p$ crucially allows our algorithms to extend naturally to efficient algorithms for $\\ell_\\infty$ regression, via approximation of $\\ell_\\infty$ by $\\ell_{\\mathcal{O}(\\log n)}$. Of practical interest, we also develop a new subsampling algorithm for $\\ell_p$ regression for arbitrary matrices, which is simpler than previous approaches for $p \\ge 4$.", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "meyer|fast_regression_for_structured_inputs", "pdf": "/pdf/a76864e8c343a5dcb3414cc8caa6fc2fdd2afc19.pdf", "supplementary_material": "/attachment/33c6a51bd06a695c841c65487e83bf29502afc12.zip", "_bibtex": "@inproceedings{\nmeyer2022fast,\ntitle={Fast Regression for Structured Inputs},\nauthor={Raphael A Meyer and Cameron N Musco and Christopher P Musco and David Woodruff and Samson Zhou},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=gNp54NxHUPJ}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 4}} +{"id": "qhC8mr2LEKq", "original": "6DY9XpRajsio", "number": 4543, "cdate": 1632875759849, "mdate": null, "ddate": null, "tcdate": 1632875759849, "tmdate": 1750551541625, "tddate": null, "forum": "qhC8mr2LEKq", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "CrossBeam: Learning to Search in Bottom-Up Program Synthesis", "authorids": ["~Kensen_Shi1", "~Hanjun_Dai1", "~Kevin_Ellis1", "~Charles_Sutton1"], "authors": ["Kensen Shi", "Hanjun Dai", "Kevin Ellis", "Charles Sutton"], "keywords": ["Program Synthesis", "Bottom-Up Search"], "abstract": "Many approaches to program synthesis perform a search within an enormous space of programs to find one that satisfies a given specification. Prior works have used neural models to guide combinatorial search algorithms, but such approaches still explore a huge portion of the search space and quickly become intractable as the size of the desired program increases. To tame the search space blowup, we propose training a neural model to learn a hands-on search policy for bottom-up synthesis, instead of relying on a combinatorial search algorithm. Our approach, called CrossBeam, uses the neural model to choose how to combine previously-explored programs into new programs, taking into account the search history and partial program executions. Motivated by work in structured prediction on learning to search, CrossBeam is trained on-policy using data extracted from its own bottom-up searches on training tasks. We evaluate CrossBeam in two very different domains, string manipulation and logic programming. We observe that CrossBeam learns to search efficiently, exploring much smaller portions of the program space compared to the state-of-the-art.\n", "one-sentence_summary": "We propose training a neural model to learn a hands-on search policy for bottom-up program synthesis, in an effort to tame the search space blowup.", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "shi|crossbeam_learning_to_search_in_bottomup_program_synthesis", "pdf": "/pdf/d098dde7689c9940303ddd8c11f5f44e8b866692.pdf", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 2 code implementations](https://www.catalyzex.com/paper/crossbeam-learning-to-search-in-bottom-up/code)", "_bibtex": "@inproceedings{\nshi2022crossbeam,\ntitle={CrossBeam: Learning to Search in Bottom-Up Program Synthesis},\nauthor={Kensen Shi and Hanjun Dai and Kevin Ellis and Charles Sutton},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=qhC8mr2LEKq}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 14}} +{"id": "M6M8BEmd6dq", "original": "WwHWh6FrcuY", "number": 4542, "cdate": 1632875759780, "mdate": null, "ddate": null, "tcdate": 1632875759780, "tmdate": 1676330446723, "tddate": null, "forum": "M6M8BEmd6dq", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "PEARL: Data Synthesis via Private Embeddings and Adversarial Reconstruction Learning", "authorids": ["~Seng_Pei_Liew1", "~Tsubasa_Takahashi1", "~Michihiko_Ueno1"], "authors": ["Seng Pei Liew", "Tsubasa Takahashi", "Michihiko Ueno"], "keywords": ["Differential Privacy", "Generative Model"], "abstract": "We propose a new framework of synthesizing data using deep generative models in a differentially private manner.\nWithin our framework, sensitive data are sanitized with rigorous privacy guarantees in a one-shot fashion, such that training deep generative models is possible without re-using the original data.\nHence, no extra privacy costs or model constraints are incurred, in contrast to popular gradient sanitization approaches, which, among other issues, cause degradation in privacy guarantees as the training iteration increases.\nWe demonstrate a realization of our framework by making use of the characteristic function and an adversarial re-weighting objective, which are of independent interest as well.\nOur proposal has theoretical guarantees of performance, and empirical evaluations on multiple datasets show that our approach outperforms other methods at reasonable levels of privacy.", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "liew|pearl_data_synthesis_via_private_embeddings_and_adversarial_reconstruction_learning", "pdf": "/pdf/3efedef6ce8396ae22861cd7154606c25bd31e95.pdf", "data": "", "code": "", "_bibtex": "@inproceedings{\nliew2022pearl,\ntitle={{PEARL}: Data Synthesis via Private Embeddings and Adversarial Reconstruction Learning},\nauthor={Seng Pei Liew and Tsubasa Takahashi and Michihiko Ueno},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=M6M8BEmd6dq}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 8}} +{"id": "aOX3a9q3RVV", "original": "FoplHL1Z3aGv", "number": 4523, "cdate": 1632875758497, "mdate": null, "ddate": null, "tcdate": 1632875758497, "tmdate": 1676330447840, "tddate": null, "forum": "aOX3a9q3RVV", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "Divisive Feature Normalization Improves Image Recognition Performance in AlexNet", "authorids": ["~Michelle_Miller3", "~SueYeon_Chung1", "~Kenneth_D._Miller2"], "authors": ["Michelle Miller", "SueYeon Chung", "Kenneth D. Miller"], "keywords": ["divisive normalization", "AlexNet", "ImageNet", "CIFAR-100", "manifold capacity", "sparsity", "receptive fields", "Batch Normalization", "Group Normalization", "Layer Normalization"], "abstract": "Local divisive normalization provides a phenomenological description of many nonlinear response properties of neurons across visual cortical areas. To gain insight into the utility of this operation, we studied the effects on AlexNet of a local divisive normalization between features, with learned parameters. Developing features were arranged in a line topology, with the influence between features determined by an exponential function of the distance between them. We compared an AlexNet model with no normalization or with canonical normalizations (Batch, Group, Layer) to the same models with divisive normalization added. Divisive normalization always improved performance for models with batch or group or no normalization, generally by 1-2 percentage points, on both the CIFAR-100 and ImageNet databases. To gain insight into mechanisms underlying the improved performance, we examined several aspects of network representations. In the early layers both canonical and divisive normalizations reduced manifold capacities and increased average dimension of the individual categorical manifolds. In later layers the capacity was higher and manifold dimension lower for models roughly in order of their performance improvement. Examining the sparsity of activations across a given layer, divisive normalization layers increased sparsity, while the canonical normalization layers decreased it. Nonetheless, in the final layer, the sparseness of activity increased in the order of no normalization, divisive, com- bined, and canonical. We also investigated how the receptive fields (RFs) in the first convolutional layer (where RFs are most interpretable) change with normalization. Divisive normalization enhanced RF Fourier power at low wavelengths, while divisive+canonical enhanced power at mid (batch, group) or low (layer) wavelengths, compared to canonical alone or no normalization. In conclusion, divisive normalization enhances image recognition performance, most strongly when combined with canonical normalization, and in doing so it reduces manifold capacity and sparsity in early layers while increasing them in final layers, and increases low- or mid-wavelength power in the first-layer receptive fields.", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "miller|divisive_feature_normalization_improves_image_recognition_performance_in_alexnet", "pdf": "/pdf/452011d69839dd4fa39ba4bec882b24cb5bb2649.pdf", "one-sentence_summary": "DIVISIVE FEATURE NORMALIZATION IMPROVES IMAGE RECOGNITION PERFORMANCE AND IN- CREASES MANIFOLD CAPACITY, SPARSITY, AND LOW-FREQUENCY REPRESENTATION IN DEEP NETS", "_bibtex": "@inproceedings{\nmiller2022divisive,\ntitle={Divisive Feature Normalization Improves Image Recognition Performance in AlexNet},\nauthor={Michelle Miller and SueYeon Chung and Kenneth D. Miller},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=aOX3a9q3RVV}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 24}} +{"id": "bTteFbU99ye", "original": "74JjSWH2fdFx", "number": 4509, "cdate": 1632875757549, "mdate": null, "ddate": null, "tcdate": 1632875757549, "tmdate": 1750551542359, "tddate": null, "forum": "bTteFbU99ye", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "Evaluating Distributional Distortion in Neural Language Modeling", "authorids": ["~Benjamin_LeBrun1", "~Alessandro_Sordoni2", "~Timothy_J._O'Donnell1"], "authors": ["Benjamin LeBrun", "Alessandro Sordoni", "Timothy J. O'Donnell"], "keywords": [], "abstract": "A fundamental characteristic of natural language is the high rate at which speakers produce novel expressions. Because of this novelty, a heavy-tail of rare events accounts for a significant amount of the total probability mass of distributions in language (Baayen, 2001). Standard language modeling metrics such as perplexity quantify the performance of language models (LM) in aggregate. As a result, we have relatively little understanding of whether neural LMs accurately estimate the probability of sequences in this heavy-tail of rare events. To address this gap, we develop a controlled evaluation scheme which uses generative models trained on natural data as artificial languages from which we can exactly compute sequence probabilities. Training LMs on generations from these artificial languages, we compare the sequence-level probability estimates given by LMs to the true probabilities in the target language. Our experiments reveal that LSTM and Transformer language models (i) systematically underestimate the probability of sequences drawn from the target language, and (ii) do so more severely for less-probable sequences. Investigating where this probability mass went, (iii) we find that LMs tend to overestimate the probability of ill formed (perturbed) sequences. In addition, we find that this underestimation behaviour (iv) is weakened, but not eliminated by greater amounts of training data, and (v) is exacerbated for target distributions with lower entropy.", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "lebrun|evaluating_distributional_distortion_in_neural_language_modeling", "pdf": "/pdf/c22ea9d1df97b96c390eb350b4c09eb8e2388128.pdf", "data": "", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 2 code implementations](https://www.catalyzex.com/paper/evaluating-distributional-distortion-in/code)", "_bibtex": "@inproceedings{\nlebrun2022evaluating,\ntitle={Evaluating Distributional Distortion in Neural Language Modeling},\nauthor={Benjamin LeBrun and Alessandro Sordoni and Timothy J. O'Donnell},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=bTteFbU99ye}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 11}} +{"id": "r5qumLiYwf9", "original": "T70QgJawkK6", "number": 4501, "cdate": 1632875757005, "mdate": null, "ddate": null, "tcdate": 1632875757005, "tmdate": 1750551542673, "tddate": null, "forum": "r5qumLiYwf9", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "MaGNET: Uniform Sampling from Deep Generative Network Manifolds Without Retraining", "authorids": ["~Ahmed_Imtiaz_Humayun1", "~Randall_Balestriero1", "~Richard_Baraniuk1"], "authors": ["Ahmed Imtiaz Humayun", "Randall Balestriero", "Richard Baraniuk"], "keywords": ["Deep Generative Networks", "Uniform Sampling", "Fairness", "Data Augmentation"], "abstract": "Deep Generative Networks (DGNs) are extensively employed in Generative Adversarial Networks (GANs), Variational Autoencoders (VAEs), and their variants to approximate the data manifold, and data distribution on that manifold. However, training samples are often obtained based on preferences, costs, or convenience producing artifacts in the empirical data distribution e.g. the large fraction of smiling faces in the CelebA dataset or the large fraction of dark-haired individuals in FFHQ). {\\em These inconsistencies will be reproduced when sampling from the trained DGN, which has far-reaching potential implications for fairness, data augmentation, anomaly detection, domain adaptation, and beyond.} In response, we develop a differential geometry based sampler -coined MaGNET- that, given any trained DGN, produces samples that are uniformly distributed on the learned manifold. We prove theoretically and empirically that our technique produces a uniform distribution on the manifold regardless of the training set distribution. We perform a range of experiments on various datasets and DGNs. One of them considers the state-of-the-art StyleGAN2 trained on FFHQ dataset, where uniform sampling via MaGNET increases distribution precision \\& recall by 4.12\\% \\& 3.01\\% and decreases gender bias by 41.2\\%, without requiring labels or retraining.", "one-sentence_summary": "We propose a differential-geometry-based technique to provably sample uniformly from the data manifold of a trained Deep Generative Network without the need for retraining.", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "humayun|magnet_uniform_sampling_from_deep_generative_network_manifolds_without_retraining", "pdf": "/pdf/e9c0ccdf7ecc11a5666ac100d75f89816ce7c0f7.pdf", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 3 code implementations](https://www.catalyzex.com/paper/magnet-uniform-sampling-from-deep-generative/code)", "_bibtex": "@inproceedings{\nhumayun2022magnet,\ntitle={Ma{GNET}: Uniform Sampling from Deep Generative Network Manifolds Without Retraining},\nauthor={Ahmed Imtiaz Humayun and Randall Balestriero and Richard Baraniuk},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=r5qumLiYwf9}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 10}} +{"id": "xnYACQquaGV", "original": "_FwD4VRCrKn", "number": 4495, "cdate": 1632875756593, "mdate": null, "ddate": null, "tcdate": 1632875756593, "tmdate": 1676330449034, "tddate": null, "forum": "xnYACQquaGV", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "Neural Contextual Bandits with Deep Representation and Shallow Exploration", "authorids": ["~Pan_Xu1", "~Zheng_Wen1", "~Handong_Zhao3", "~Quanquan_Gu1"], "authors": ["Pan Xu", "Zheng Wen", "Handong Zhao", "Quanquan Gu"], "keywords": ["neural network", "deep representation learning"], "abstract": "We study neural contextual bandits, a general class of contextual bandits, where each context-action pair is associated with a raw feature vector, but the specific reward generating function is unknown. We propose a novel learning algorithm that transforms the raw feature vector using the last hidden layer of a deep ReLU neural network (deep representation learning), and uses an upper confidence bound (UCB) approach to explore in the last linear layer (shallow exploration). We prove that under standard assumptions, our proposed algorithm achieves $\\tilde{O}(\\sqrt{T})$ finite-time regret, where $T$ is the learning time horizon. Compared with existing neural contextual bandit algorithms, our approach is computationally much more efficient since it only needs to explore in the last layer of the deep neural network.", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "xu|neural_contextual_bandits_with_deep_representation_and_shallow_exploration", "pdf": "/pdf/c6ee94e7fd22670895280aaf06535b6373d428eb.pdf", "one-sentence_summary": "A new neural network based algorithm for contextual bandit problems with theoretical guarantees and empirical advantages.", "supplementary_material": "/attachment/1468959f4251fd26e9ee75f2a76ef524622386c9.zip", "_bibtex": "@inproceedings{\nxu2022neural,\ntitle={Neural Contextual Bandits with Deep Representation and Shallow Exploration},\nauthor={Pan Xu and Zheng Wen and Handong Zhao and Quanquan Gu},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=xnYACQquaGV}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 17}} +{"id": "NoB8YgRuoFU", "original": "587jtafeI0", "number": 4494, "cdate": 1632875756528, "mdate": null, "ddate": null, "tcdate": 1632875756528, "tmdate": 1676330449032, "tddate": null, "forum": "NoB8YgRuoFU", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "PI3NN: Out-of-distribution-aware Prediction Intervals from Three Neural Networks", "authorids": ["~Siyan_Liu1", "~Pei_Zhang6", "~Dan_Lu1", "~Guannan_Zhang1"], "authors": ["Siyan Liu", "Pei Zhang", "Dan Lu", "Guannan Zhang"], "keywords": [], "abstract": "We propose a novel prediction interval (PI) method for uncertainty quantification, which addresses three major issues with the state-of-the-art PI methods. First, existing PI methods require retraining of neural networks (NNs) for every given confidence level and suffer from the crossing issue in calculating multiple PIs. Second, they usually rely on customized loss functions with extra sensitive hyperparameters for which fine tuning is required to achieve a well-calibrated PI. Third, they usually underestimate uncertainties of out-of-distribution (OOD) samples leading to over-confident PIs. Our PI3NN method calculates PIs from linear combinations of three NNs, each of which is independently trained using the standard mean squared error loss. The coefficients of the linear combinations are computed using root-finding algorithms to ensure tight PIs for a given confidence level. We theoretically prove that PI3NN can calculate PIs for a series of confidence levels without retraining NNs and it completely avoids the crossing issue. Additionally, PI3NN does not introduce any unusual hyperparameters resulting in a stable performance. Furthermore, we address OOD identification challenge by introducing an initialization scheme which provides reasonably larger PIs of the OOD samples than those of the in-distribution samples. Benchmark and real-world experiments show that our method outperforms several state-of-the-art approaches with respect to predictive uncertainty quality, robustness, and OOD samples identification.", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "liu|pi3nn_outofdistributionaware_prediction_intervals_from_three_neural_networks", "pdf": "/pdf/84a3741f26e65df3c7b232779bcfb5dac283d41e.pdf", "supplementary_material": "/attachment/9f1a45eff2b80564d6dd33651f9efd61ac745e40.zip", "_bibtex": "@inproceedings{\nliu2022pinn,\ntitle={{PI}3{NN}: Out-of-distribution-aware Prediction Intervals from Three Neural Networks},\nauthor={Siyan Liu and Pei Zhang and Dan Lu and Guannan Zhang},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=NoB8YgRuoFU}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 14}} +{"id": "kj0_45Y4r9i", "original": "JfHu-xAajzT", "number": 4489, "cdate": 1632875756187, "mdate": null, "ddate": null, "tcdate": 1632875756187, "tmdate": 1676330449212, "tddate": null, "forum": "kj0_45Y4r9i", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "Discriminative Similarity for Data Clustering", "authorids": ["~Yingzhen_Yang1", "~Ping_Li3"], "authors": ["Yingzhen Yang", "Ping Li"], "keywords": ["Discriminative Similarity", "Rademacher Complexity", "Generalization Bound", "Data Clustering"], "abstract": "Similarity-based clustering methods separate data into clusters according to the pairwise similarity between the data, and the pairwise similarity is crucial for their performance. In this paper, we propose {\\em Clustering by Discriminative Similarity (CDS)}, a novel method which learns discriminative similarity for data clustering. CDS learns an unsupervised similarity-based classifier from each data partition, and searches for the optimal partition of the data by minimizing the generalization error of the learnt classifiers associated with the data partitions. By generalization analysis via Rademacher complexity, the generalization error bound for the unsupervised similarity-based classifier is expressed as the sum of discriminative similarity between the data from different classes. It is proved that the derived discriminative similarity can also be induced by the integrated squared error bound for kernel density classification. In order to evaluate the performance of the proposed discriminative similarity, we propose a new clustering method using a kernel as the similarity function, CDS via unsupervised kernel classification (CDSK), with its effectiveness demonstrated by experimental results.", "one-sentence_summary": "We present a novel discriminative similarity for data clustering, and the discriminative similarity is induced by generalization error bound for unsupervised classifier ", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "yang|discriminative_similarity_for_data_clustering", "pdf": "/pdf/b159fb24355dd1bf64f74a757973bbc8cc96d57e.pdf", "data": "", "_bibtex": "@inproceedings{\nyang2022discriminative,\ntitle={Discriminative Similarity for Data Clustering},\nauthor={Yingzhen Yang and Ping Li},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=kj0_45Y4r9i}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 12}} +{"id": "q4tZR1Y-UIs", "original": "TJl_HOwHWJq", "number": 4485, "cdate": 1632875755958, "mdate": null, "ddate": null, "tcdate": 1632875755958, "tmdate": 1676330449325, "tddate": null, "forum": "q4tZR1Y-UIs", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "It Takes Four to Tango: Multiagent Self Play for Automatic Curriculum Generation", "authorids": ["~Yuqing_Du1", "~Pieter_Abbeel2", "~Aditya_Grover1"], "authors": ["Yuqing Du", "Pieter Abbeel", "Aditya Grover"], "keywords": ["curriculum generation", "unsupervised reinforcement learning", "goal conditioned reinforcement learning", "multi agent"], "abstract": "We are interested in training general-purpose reinforcement learning agents that can solve a wide variety of goals. Training such agents efficiently requires automatic generation of a goal curriculum. This is challenging as it requires (a) exploring goals of increasing difficulty, while ensuring that the agent (b) is exposed to a diverse set of goals in a sample efficient manner and (c) does not catastrophically forget previously solved goals. We propose Curriculum Self Play (CuSP), an automated goal generation framework that seeks to satisfy these desiderata by virtue of a multi-player game with 4 agents. We extend the asymmetric curricula learning in PAIRED (Dennis et al., 2020) to a symmetrized game that carefully balances cooperation and competition between two off-policy student learners and two regret-maximizing teachers. CuSP additionally introduces entropic goal coverage and accounts for the non-stationary nature of the students, allowing us to automatically induce a curriculum that balances progressive exploration with anti-catastrophic exploitation. We demonstrate that our method succeeds at generating an effective curricula of goals for a range of control tasks, outperforming other methods at zero-shot test-time generalization to novel out-of-distribution goals.", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "du|it_takes_four_to_tango_multiagent_self_play_for_automatic_curriculum_generation", "pdf": "/pdf/68a6237e79699c723ce9c9c39537422391df3e2b.pdf", "supplementary_material": "/attachment/047ac30a146793649501c5ef3f9aac75db853146.zip", "_bibtex": "@inproceedings{\ndu2022it,\ntitle={It Takes Four to Tango: Multiagent Self Play for Automatic Curriculum Generation},\nauthor={Yuqing Du and Pieter Abbeel and Aditya Grover},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=q4tZR1Y-UIs}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 26}} +{"id": "HOjLHrlZhmx", "original": "UYlV_r98EVh", "number": 4469, "cdate": 1632875754953, "mdate": null, "ddate": null, "tcdate": 1632875754953, "tmdate": 1750551543364, "tddate": null, "forum": "HOjLHrlZhmx", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "CROP: Certifying Robust Policies for Reinforcement Learning through Functional Smoothing", "authorids": ["~Fan_Wu6", "~Linyi_Li1", "~Zijian_Huang2", "~Yevgeniy_Vorobeychik1", "~Ding_Zhao1", "~Bo_Li19"], "authors": ["Fan Wu", "Linyi Li", "Zijian Huang", "Yevgeniy Vorobeychik", "Ding Zhao", "Bo Li"], "keywords": [], "abstract": "As reinforcement learning (RL) has achieved great success and been even adopted in safety-critical domains such as autonomous vehicles, a range of empirical studies have been conducted to improve its robustness against adversarial attacks. However, how to certify its robustness with theoretical guarantees still remains challenging. In this paper, we present the first unified framework CROP (Certifying Robust Policies for RL) to provide robustness certification on both action and reward levels. In particular, we propose two robustness certification criteria: robustness of per-state actions and lower bound of cumulative rewards. We then develop a local smoothing algorithm for policies derived from Q-functions to guarantee the robustness of actions taken along the trajectory; we also develop a global smoothing algorithm for certifying the lower bound of a finite-horizon cumulative reward, as well as a novel local smoothing algorithm to perform adaptive search in order to obtain tighter reward certification. Empirically, we apply CROP to evaluate several existing empirically robust RL algorithms, including adversarial training and different robust regularization, in four environments (two representative Atari games, Highway, and CartPole). Furthermore, by evaluating these algorithms against adversarial attacks, we demonstrate that our certifications are often tight. All experiment results are available at website https://crop-leaderboard.github.io.", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "wu|crop_certifying_robust_policies_for_reinforcement_learning_through_functional_smoothing", "pdf": "/pdf/b79f87ced196c2a5a13ca10bae3d39a8924b08b8.pdf", "supplementary_material": "/attachment/526a90f67b614ec1870f2265a9abb52a644f13e2.zip", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 4 code implementations](https://www.catalyzex.com/paper/crop-certifying-robust-policies-for/code)", "_bibtex": "@inproceedings{\nwu2022crop,\ntitle={{CROP}: Certifying Robust Policies for Reinforcement Learning through Functional Smoothing},\nauthor={Fan Wu and Linyi Li and Zijian Huang and Yevgeniy Vorobeychik and Ding Zhao and Bo Li},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=HOjLHrlZhmx}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 27}} +{"id": "CCu6RcUMwK0", "original": "OVUzHQaNDS", "number": 4448, "cdate": 1632875753554, "mdate": null, "ddate": null, "tcdate": 1632875753554, "tmdate": 1750551543773, "tddate": null, "forum": "CCu6RcUMwK0", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "Neural Link Prediction with Walk Pooling", "authorids": ["~Liming_Pan1", "~Cheng_Shi2", "~Ivan_Dokmanić1"], "authors": ["Liming Pan", "Cheng Shi", "Ivan Dokmanić"], "keywords": ["Graph neural network", "Link prediction", "Random walk", "Graph topology."], "abstract": "Graph neural networks achieve high accuracy in link prediction by jointly leveraging graph topology and node attributes. Topology, however, is represented indirectly; state-of-the-art methods based on subgraph classification label nodes with distance to the target link, so that, although topological information is present, it is tempered by pooling. This makes it challenging to leverage features like loops and motifs associated with network formation mechanisms. We propose a link prediction algorithm based on a new pooling scheme called WalkPool. WalkPool combines the expressivity of topological heuristics with the feature-learning ability of neural networks. It summarizes a putative link by random walk probabilities of adjacent paths. Instead of extracting transition probabilities from the original graph, it computes the transition matrix of a ``predictive'' latent graph by applying attention to learned features; this may be interpreted as feature-sensitive topology fingerprinting. WalkPool can leverage unsupervised node features or be combined with GNNs and trained end-to-end. It outperforms state-of-the-art methods on all common link prediction benchmarks, both homophilic and heterophilic, with and without node attributes. Applying WalkPool to a set of unsupervised GNNs significantly improves prediction accuracy, suggesting that it may be used as a general-purpose graph pooling scheme. ", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "pan|neural_link_prediction_with_walk_pooling", "pdf": "/pdf/ad031c5e836c55357e2f13cdb18fa502a7eecc80.pdf", "supplementary_material": "/attachment/5a654633d2e6d28cb2b30509b34926fd851a1c48.zip", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 1 code implementation](https://www.catalyzex.com/paper/neural-link-prediction-with-walk-pooling/code)", "_bibtex": "@inproceedings{\npan2022neural,\ntitle={Neural Link Prediction with Walk Pooling},\nauthor={Liming Pan and Cheng Shi and Ivan Dokmani{\\'c}},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=CCu6RcUMwK0}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 19}} +{"id": "YeShU5mLfLt", "original": "a160ykT-syy", "number": 4436, "cdate": 1632875752760, "mdate": null, "ddate": null, "tcdate": 1632875752760, "tmdate": 1676330452004, "tddate": null, "forum": "YeShU5mLfLt", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "On the Convergence of Certified Robust Training with Interval Bound Propagation", "authorids": ["~Yihan_Wang2", "~Zhouxing_Shi1", "~Quanquan_Gu1", "~Cho-Jui_Hsieh1"], "authors": ["Yihan Wang", "Zhouxing Shi", "Quanquan Gu", "Cho-Jui Hsieh"], "keywords": ["Certified robustness", "Adversarial robustness", "Convergence"], "abstract": "Interval Bound Propagation (IBP) is so far the base of state-of-the-art methods for training neural networks with certifiable robustness guarantees when potential adversarial perturbations present, while the convergence of IBP training remains unknown in existing literature. In this paper, we present a theoretical analysis on the convergence of IBP training. With an overparameterized assumption, we analyze the convergence of IBP robust training. We show that when using IBP training to train a randomly initialized two-layer ReLU neural network with logistic loss, gradient descent can linearly converge to zero robust training error with a high probability if we have sufficiently small perturbation radius and large network width.", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "wang|on_the_convergence_of_certified_robust_training_with_interval_bound_propagation", "pdf": "/pdf/4e7f7f34a6f11b062e283b3a04324bb373e39067.pdf", "one-sentence_summary": "We present the first theoretical analysis on the convergence of certified robust training with interval bound propagation.", "_bibtex": "@inproceedings{\nwang2022on,\ntitle={On the Convergence of Certified Robust Training with Interval Bound Propagation},\nauthor={Yihan Wang and Zhouxing Shi and Quanquan Gu and Cho-Jui Hsieh},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=YeShU5mLfLt}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 13}} +{"id": "sX3XaHwotOg", "original": "3xkYmrpZMjX", "number": 4429, "cdate": 1632875752368, "mdate": null, "ddate": null, "tcdate": 1632875752368, "tmdate": 1750551544071, "tddate": null, "forum": "sX3XaHwotOg", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "Pretraining Text Encoders with Adversarial Mixture of Training Signal Generators", "authorids": ["~Yu_Meng1", "~Chenyan_Xiong1", "~Payal_Bajaj2", "~saurabh_tiwary1", "~Paul_N._Bennett1", "~Jiawei_Han1", "~Xia_Song1"], "authors": ["Yu Meng", "Chenyan Xiong", "Payal Bajaj", "saurabh tiwary", "Paul N. Bennett", "Jiawei Han", "Xia Song"], "keywords": ["Language Model Pretraining"], "abstract": "We present a new framework AMOS that pretrains text encoders with an Adversarial learning curriculum via a Mixture Of Signals from multiple auxiliary generators. Following ELECTRA-style pretraining, the main encoder is trained as a discriminator to detect replaced tokens generated by auxiliary masked language models (MLMs). Different from ELECTRA which trains one MLM as the generator, we jointly train multiple MLMs of different sizes to provide training signals at various levels of difficulty. To push the discriminator to learn better with challenging replaced tokens, we learn mixture weights over the auxiliary MLMs' outputs to maximize the discriminator loss by backpropagating the gradient from the discriminator via Gumbel-Softmax. For better pretraining efficiency, we propose a way to assemble multiple MLMs into one unified auxiliary model. AMOS outperforms ELECTRA and recent state-of-the-art pretrained models by about 1 point on the GLUE benchmark for BERT base-sized models.", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "meng|pretraining_text_encoders_with_adversarial_mixture_of_training_signal_generators", "pdf": "/pdf/4127a755f1e5ee998e6423f7a8d734f9e88b8cab.pdf", "one-sentence_summary": "We present AMOS, a new method that pretrains text encoders with an Adversarial learning curriculum via a Mixture Of Signals from multiple auxiliary generators.", "data": "", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 1 code implementation](https://www.catalyzex.com/paper/pretraining-text-encoders-with-adversarial/code)", "_bibtex": "@inproceedings{\nmeng2022pretraining,\ntitle={Pretraining Text Encoders with Adversarial Mixture of Training Signal Generators},\nauthor={Yu Meng and Chenyan Xiong and Payal Bajaj and saurabh tiwary and Paul N. Bennett and Jiawei Han and Xia Song},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=sX3XaHwotOg}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 27}} +{"id": "0jP2n0YFmKG", "original": "f6c4B-7AYfd", "number": 4408, "cdate": 1632875750942, "mdate": null, "ddate": null, "tcdate": 1632875750942, "tmdate": 1676330452434, "tddate": null, "forum": "0jP2n0YFmKG", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "Towards Training Billion Parameter Graph Neural Networks for Atomic Simulations", "authorids": ["~Anuroop_Sriram1", "~Abhishek_Das1", "~Brandon_M_Wood1", "~Siddharth_Goyal2", "~C._Lawrence_Zitnick2"], "authors": ["Anuroop Sriram", "Abhishek Das", "Brandon M Wood", "Siddharth Goyal", "C. Lawrence Zitnick"], "keywords": ["Graph Neural Networks", "Atomic Simulations", "Computational Chemistry"], "abstract": "Recent progress in Graph Neural Networks (GNNs) for modeling atomic simulations has the potential to revolutionize catalyst discovery, which is a key step in making progress towards the energy breakthroughs needed to combat climate change. However, the GNNs that have proven most effective for this task are memory intensive as they model higher-order interactions in the graphs such as those between triplets or quadruplets of atoms, making it challenging to scale these models. In this paper, we introduce Graph Parallelism, a method to distribute input graphs across multiple GPUs, enabling us to train very large GNNs with hundreds of millions or billions of parameters. We empirically evaluate our method by scaling up the recently proposed DimeNet++ and GemNet models by over an order of magnitude in the number of parameters. On the large-scale Open Catalyst 2020 (OC20) dataset, these graph-parallelized models lead to relative improvements of 1) 15% on the force MAE metric on the S2EF task and 2) 21% on the AFbT metric on the IS2RS task, establishing new state-of-the-art results.", "pdf": "/pdf/d00345679f2290baeabb225428516fad14fea79e.pdf", "one-sentence_summary": "We scale GNNs used for modeling atomic simulations by an order of magnitude and obtain large performance improvements on the Open Catalyst 2020 dataset.", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "sriram|towards_training_billion_parameter_graph_neural_networks_for_atomic_simulations", "_bibtex": "@inproceedings{\nsriram2022towards,\ntitle={Towards Training Billion Parameter Graph Neural Networks for Atomic Simulations},\nauthor={Anuroop Sriram and Abhishek Das and Brandon M Wood and C. Lawrence Zitnick},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=0jP2n0YFmKG}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 12}} +{"id": "shbAgEsk3qM", "original": "gU5cgt5F8Sa", "number": 4376, "cdate": 1632875748869, "mdate": null, "ddate": null, "tcdate": 1632875748869, "tmdate": 1676330454106, "tddate": null, "forum": "shbAgEsk3qM", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "Understanding and Leveraging Overparameterization in Recursive Value Estimation", "authorids": ["~Chenjun_Xiao1", "~Bo_Dai1", "~Jincheng_Mei1", "~Oscar_A_Ramirez1", "~Ramki_Gummadi1", "~Chris_Harris1", "~Dale_Schuurmans1"], "authors": ["Chenjun Xiao", "Bo Dai", "Jincheng Mei", "Oscar A Ramirez", "Ramki Gummadi", "Chris Harris", "Dale Schuurmans"], "keywords": ["Temporal Difference Learning", "Residual Minimization", "Value Estimation", "Overparameterization"], "abstract": "The theory of function approximation in reinforcement learning (RL) typically considers low capacity representations that incur a tradeoff between approximation error, stability and generalization. Current deep architectures, however, operate in an overparameterized regime where approximation error is not necessarily a bottleneck. To better understand the utility of deep models in RL we present an analysis of recursive value estimation using \\emph{overparameterized} linear representations that provides useful, transferable findings. First, we show that classical updates such as temporal difference (TD) learning or fitted-value-iteration (FVI) converge to \\emph{different} fixed points than residual minimization (RM) in the overparameterized linear case. We then develop a unified interpretation of overparameterized linear value estimation as minimizing the Euclidean norm of the weights subject to alternative constraints. A practical consequence is that RM can be modified by a simple alteration of the backup targets to obtain the same fixed points as FVI and TD (when they converge), while universally ensuring stability. Further, we provide an analysis of the generalization error of these methods, demonstrating per iterate bounds on the value prediction error of FVI, and fixed point bounds for TD and RM. \nGiven this understanding, we then develop new algorithmic tools for improving recursive value estimation with deep models. \nIn particular, we extract two regularizers that penalize out-of-span top-layer weights and co-linearity in top-layer features respectively. Empirically we find that these regularizers dramatically improve the stability of TD and FVI, while allowing RM to match and even sometimes surpass their generalization performance with assured stability. ", "one-sentence_summary": "We present an analysis of value estimation under overparameterized linear representations, and develop new algorithmic tools for improving recursive value estimation with deep models based on the new findings.", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "xiao|understanding_and_leveraging_overparameterization_in_recursive_value_estimation", "pdf": "/pdf/c5131ad5930c1a9f32ede673f284175158a75792.pdf", "_bibtex": "@inproceedings{\nxiao2022understanding,\ntitle={Understanding and Leveraging Overparameterization in Recursive Value Estimation},\nauthor={Chenjun Xiao and Bo Dai and Jincheng Mei and Oscar A Ramirez and Ramki Gummadi and Chris Harris and Dale Schuurmans},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=shbAgEsk3qM}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 30}} +{"id": "dPyRNUlttBv", "original": "72eah3EepFC", "number": 4368, "cdate": 1632875748328, "mdate": null, "ddate": null, "tcdate": 1632875748328, "tmdate": 1676330454113, "tddate": null, "forum": "dPyRNUlttBv", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "Optimization and Adaptive Generalization of Three layer Neural Networks", "authorids": ["~Khashayar_Gatmiry1", "~Stefanie_Jegelka3", "~Jonathan_Kelner1"], "authors": ["Khashayar Gatmiry", "Stefanie Jegelka", "Jonathan Kelner"], "keywords": ["deep learning theory", "adaptive kernel", "robust deep learning", "neural tangent kernel", "adaptive generalization", "non-convex optimization"], "abstract": "While there has been substantial recent work studying generalization of neural networks, \nthe ability of deep nets in automating the process of feature extraction still evades a thorough mathematical understanding. \nAs a step toward this goal, we analyze learning and generalization of a three-layer neural network with ReLU activations in a regime that goes beyond the linear approximation of the network, and is hence not captured by the common Neural Tangent Kernel. We show that despite nonconvexity of the empirical loss, a variant of SGD converges in polynomially many iterations to a good solution that generalizes. In particular, our generalization bounds are adaptive: they automatically optimize over a family of kernels that includes the Neural Tangent Kernel, to provide the tightest bound. ", "one-sentence_summary": "Algorithmically obtaining noise-robust and adaptive generalization bounds for a three layer network model by going beyond the linear approximation of the network", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "gatmiry|optimization_and_adaptive_generalization_of_three_layer_neural_networks", "pdf": "/pdf/086ce10c9607a92d59635b0ac0f1f0bd8c86ae5b.pdf", "supplementary_material": "/attachment/ecbe39edaf5ae30cc427ed0c679a34b32f233847.zip", "_bibtex": "@inproceedings{\ngatmiry2022optimization,\ntitle={Optimization and Adaptive Generalization of Three layer Neural Networks},\nauthor={Khashayar Gatmiry and Stefanie Jegelka and Jonathan Kelner},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=dPyRNUlttBv}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 13}} +{"id": "-TSe5o7STVR", "original": "uguqwDk2vKJA", "number": 4365, "cdate": 1632875748125, "mdate": null, "ddate": null, "tcdate": 1632875748125, "tmdate": 1750551545558, "tddate": null, "forum": "-TSe5o7STVR", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "Non-Parallel Text Style Transfer with Self-Parallel Supervision", "authorids": ["~Ruibo_Liu1", "~Chongyang_Gao1", "~Chenyan_Jia1", "~Guangxuan_Xu1", "~Soroush_Vosoughi1"], "authors": ["Ruibo Liu", "Chongyang Gao", "Chenyan Jia", "Guangxuan Xu", "Soroush Vosoughi"], "keywords": ["style transfer", "non-parallel corpus", "imitation learning", "language models", "political stance transfer"], "abstract": "The performance of existing text style transfer models is severely limited by the non-parallel datasets on which the models are trained. In non-parallel datasets, no direct mapping exists between sentences of the source and target style; the style transfer models thus only receive weak supervision of the target sentences during training, which often leads the model to discard too much style-independent information, or utterly fail to transfer the style.\n\nIn this work, we propose LaMer, a novel text style transfer framework based on large-scale language models. LaMer first mines the roughly parallel expressions in the non-parallel datasets with scene graphs, and then employs MLE training, followed by imitation learning refinement, to leverage the intrinsic parallelism within the data. On two benchmark tasks (sentiment & formality transfer) and a newly proposed challenging task (political stance transfer), our model achieves qualitative advances in transfer accuracy, content preservation, and fluency. Further empirical and human evaluations demonstrate that our model not only makes training more efficient, but also generates more readable and diverse expressions than previous models.", "one-sentence_summary": "We propose a new text style transfer model for non-parallel corpus with supervision from intrinsic parallelism.", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "liu|nonparallel_text_style_transfer_with_selfparallel_supervision", "pdf": "/pdf/7858e341aa92c11991455a43e9a78c35ee4655a2.pdf", "supplementary_material": "/attachment/e2ae99e37d9af5e859e28d2d17d4dd76a85c092d.zip", "data": "", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 2 code implementations](https://www.catalyzex.com/paper/non-parallel-text-style-transfer-with-self/code)", "_bibtex": "@inproceedings{\nliu2022nonparallel,\ntitle={Non-Parallel Text Style Transfer with Self-Parallel Supervision},\nauthor={Ruibo Liu and Chongyang Gao and Chenyan Jia and Guangxuan Xu and Soroush Vosoughi},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=-TSe5o7STVR}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 24}} +{"id": "qhkFX-HLuHV", "original": "l85wba8eBw9", "number": 4364, "cdate": 1632875748055, "mdate": null, "ddate": null, "tcdate": 1632875748055, "tmdate": 1676330454650, "tddate": null, "forum": "qhkFX-HLuHV", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "Can an Image Classifier Suffice For Action Recognition?", "authorids": ["~Quanfu_Fan1", "~Chun-Fu_Chen1", "~Rameswar_Panda1"], "authors": ["Quanfu Fan", "Chun-Fu Chen", "Rameswar Panda"], "keywords": ["action recognition", "image classifier", "super image", "vision transformer"], "abstract": "We explore a new perspective on video understanding by casting the video recognition problem as an image recognition task. Our approach rearranges input video frames into super images, which allow for training an image classifier directly to fulfill the task of action recognition, in exactly the same way as image classification. With such a simple idea, we show that transformer-based image classifiers alone can suffice for action recognition. In particular, our approach demonstrates strong and promising performance against SOTA methods on several public datasets including Kinetics400, Moments In Time, Something-Something V2 (SSV2), Jester and Diving48. We also experiment with the prevalent ResNet image classifiers in computer vision to further validate our idea. The results on both Kinetics400 and SSV2 are comparable to some of the best-performed CNN approaches based on spatio-temporal modeling. Our source codes and models are available at \\url{https://github.com/IBM/sifar-pytorch}.", "one-sentence_summary": "We propose the idea of super images to re-purpose an image classifer for action recognition.", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "fan|can_an_image_classifier_suffice_for_action_recognition", "pdf": "/pdf/30716aa30d9fbd5e0f9a95e4c0e1255607ab8bc4.pdf", "data": "", "_bibtex": "@inproceedings{\nfan2022can,\ntitle={Can an Image Classifier Suffice For Action Recognition?},\nauthor={Quanfu Fan and Chun-Fu Chen and Rameswar Panda},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=qhkFX-HLuHV}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 14}} +{"id": "IK9ap6nxXr2", "original": "iy8-KjIy36P", "number": 4326, "cdate": 1632875745560, "mdate": null, "ddate": null, "tcdate": 1632875745560, "tmdate": 1750551546574, "tddate": null, "forum": "IK9ap6nxXr2", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "Interacting Contour Stochastic Gradient Langevin Dynamics", "authorids": ["~Wei_Deng1", "~Siqi_Liang1", "~Botao_Hao1", "~Guang_Lin1", "~Faming_Liang1"], "authors": ["Wei Deng", "Siqi Liang", "Botao Hao", "Guang Lin", "Faming Liang"], "keywords": ["stochastic gradient Langevin dynamics", "MCMC", "importance sampling", "Wang-Landau algorithm", "Parallel MCMC Methods", "stochastic approximation"], "abstract": "We propose an interacting contour stochastic gradient Langevin dynamics (ICSGLD) sampler, an embarrassingly parallel multiple-chain contour stochastic gradient Langevin dynamics (CSGLD) sampler with efficient interactions. We show that ICSGLD can be theoretically more efficient than a single-chain CSGLD with an equivalent computational budget. We also present a novel random-field function, which facilitates the estimation of self-adapting parameters in big data and obtains free mode explorations. Empirically, we compare the proposed algorithm with popular benchmark methods for posterior sampling. The numerical results show a great potential of ICSGLD for large-scale uncertainty estimation tasks.", "one-sentence_summary": "We propose an interacting contour stochastic gradient Langevin dynamics sampler and prove it can be theoretically more efficient than a single-chain process with an equivalent computational budget.", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "deng|interacting_contour_stochastic_gradient_langevin_dynamics", "pdf": "/pdf/bf454b672f7afe0c72e3a83029c7238309a1b4a0.pdf", "supplementary_material": "/attachment/322c413292673974c1abaa360c081e05e58a9c8a.zip", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 1 code implementation](https://www.catalyzex.com/paper/interacting-contour-stochastic-gradient/code)", "_bibtex": "@inproceedings{\ndeng2022interacting,\ntitle={Interacting Contour Stochastic Gradient Langevin Dynamics},\nauthor={Wei Deng and Siqi Liang and Botao Hao and Guang Lin and Faming Liang},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=IK9ap6nxXr2}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 21}} +{"id": "MIX3fJkl_1", "original": "IA5pAjTeV2Ac", "number": 4325, "cdate": 1632875745492, "mdate": null, "ddate": null, "tcdate": 1632875745492, "tmdate": 1750551546731, "tddate": null, "forum": "MIX3fJkl_1", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "NeuPL: Neural Population Learning", "authorids": ["~Siqi_Liu1", "~Luke_Marris2", "~Daniel_Hennes1", "~Josh_Merel1", "~Nicolas_Heess1", "~Thore_Graepel1"], "authors": ["Siqi Liu", "Luke Marris", "Daniel Hennes", "Josh Merel", "Nicolas Heess", "Thore Graepel"], "keywords": ["Multi-Agent Learning", "Game Theory", "Population Learning"], "abstract": "Learning in strategy games (e.g. StarCraft, poker) requires the discovery of diverse policies. This is often achieved by iteratively training new policies against existing ones, growing a policy population that is robust to exploit. This iterative approach suffers from two issues in real-world games: a) under finite budget, approximate best-response operators at each iteration needs truncating, resulting in under-trained good-responses populating the population; b) repeated learning of basic skills at each iteration is wasteful and becomes intractable in the presence of increasingly strong opponents. In this work, we propose Neural Population Learning (NeuPL) as a solution to both issues. NeuPL offers convergence guarantees to a population of best-responses under mild assumptions. By representing a population of policies within a single conditional model, NeuPL enables transfer learning across policies. Empirically, we show the generality, improved performance and efficiency of NeuPL across several test domains. Most interestingly, we show that novel strategies become more accessible, not less, as the neural population expands.", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "liu|neupl_neural_population_learning", "pdf": "/pdf/eeeb391c4885267d9c80ba3a8ea3dfd9e9ea8832.pdf", "one-sentence_summary": "We propose NeuPL, a general and efficient population learning framework that learns and represents diverse policies in symmetric zero-sum games within a single conditional network via self-play.", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 1 code implementation](https://www.catalyzex.com/paper/neupl-neural-population-learning/code)", "_bibtex": "@inproceedings{\nliu2022neupl,\ntitle={Neu{PL}: Neural Population Learning},\nauthor={Siqi Liu and Luke Marris and Daniel Hennes and Josh Merel and Nicolas Heess and Thore Graepel},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=MIX3fJkl_1}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 16}} +{"id": "hniLRD_XCA", "original": "Yhs6u0uPWets", "number": 4319, "cdate": 1632875745084, "mdate": null, "ddate": null, "tcdate": 1632875745084, "tmdate": 1676330456993, "tddate": null, "forum": "hniLRD_XCA", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "DeSKO: Stability-Assured Robust Control with a Deep Stochastic Koopman Operator", "authorids": ["~Minghao_Han2", "~Jacob_Euler-Rolle1", "~Robert_K._Katzschmann1"], "authors": ["Minghao Han", "Jacob Euler-Rolle", "Robert K. Katzschmann"], "keywords": ["Koopman Operator", "Robust Control", "Robotics", "Model Predictive Control", "Soft Robotics"], "abstract": "The Koopman operator theory linearly describes nonlinear dynamical systems in a high-dimensional functional space and it allows to apply linear control methods to highly nonlinear systems. However, the Koopman operator does not account for any uncertainty in dynamical systems, causing it to perform poorly in real-world applications.\nTherefore, we propose a deep stochastic Koopman operator (DeSKO) model in a robust learning control framework to guarantee stability of nonlinear stochastic systems. The DeSKO model captures a dynamical system's uncertainty by inferring a distribution of observables. We use the inferred distribution to design a robust, stabilizing closed-loop controller for a dynamical system. Modeling and control experiments on several advanced control benchmarks show that our framework is more robust and scalable than state-of-the-art deep Koopman operators and reinforcement learning methods. Tested control benchmarks include a soft robotic arm, a legged robot, and a biological gene regulatory network. We also demonstrate that this robust control method resists previously unseen uncertainties, such as external disturbances, with a magnitude of up to five times the maximum control input. Our approach opens up new possibilities in learning control for high-dimensional nonlinear systems while robustly managing internal or external uncertainty.", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "han|desko_stabilityassured_robust_control_with_a_deep_stochastic_koopman_operator", "pdf": "/pdf/862602026e43c103de39be4295ff8f7288f3acf2.pdf", "one-sentence_summary": "A robust learning control framework with guarantee stability based on deep stochastic Koopman operator models", "supplementary_material": "/attachment/8fd81957080cab01cd427ae09252f3266e67e4e5.zip", "_bibtex": "@inproceedings{\nhan2022desko,\ntitle={De{SKO}: Stability-Assured Robust Control with a Deep Stochastic Koopman Operator},\nauthor={Minghao Han and Jacob Euler-Rolle and Robert K. Katzschmann},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=hniLRD_XCA}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 10}} +{"id": "oiZJwC_fyS", "original": "5jUw6GFnGOfr", "number": 4300, "cdate": 1632875743879, "mdate": null, "ddate": null, "tcdate": 1632875743879, "tmdate": 1676330457959, "tddate": null, "forum": "oiZJwC_fyS", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "Neural Network Approximation based on Hausdorff distance of Tropical Zonotopes", "authorids": ["~Panagiotis_Misiakos1", "~Georgios_Smyrnis1", "~George_Retsinas2", "~Petros_Maragos1"], "authors": ["Panagiotis Misiakos", "Georgios Smyrnis", "George Retsinas", "Petros Maragos"], "keywords": ["Tropical Geometry", "Zonotopes", "Hausdorff Approximation", "Neural Network Compression"], "abstract": "In this work we theoretically contribute to neural network approximation by providing a novel tropical geometrical viewpoint to structured neural network compression. In particular, we show that the approximation error between two neural networks with ReLU activations and one hidden layer depends on the Hausdorff distance of the tropical zonotopes of the networks. This theorem comes as a first step towards a purely geometrical interpretation of neural network approximation. Based on this theoretical contribution, we propose geometrical methods that employ the K-means algorithm to compress the fully connected parts of ReLU activated deep neural networks. We analyze the error bounds of our algorithms theoretically based on our approximation theorem and evaluate them empirically on neural network compression. Our experiments follow a proof-of-concept strategy and indicate that our geometrical tools achieve improved performance over relevant tropical geometry techniques and can be competitive against non-tropical methods. ", "pdf": "/pdf/e09efd74b974abec052126ca4cbb787b04fd3265.pdf", "supplementary_material": "/attachment/e4ebfc4ae844913fb8a8941da483a5e99aebc3cb.zip", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "misiakos|neural_network_approximation_based_on_hausdorff_distance_of_tropical_zonotopes", "data": "", "_bibtex": "@inproceedings{\nmisiakos2022neural,\ntitle={Neural Network Approximation based on Hausdorff distance of Tropical Zonotopes},\nauthor={Panagiotis Misiakos and Georgios Smyrnis and George Retsinas and Petros Maragos},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=oiZJwC_fyS}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 10}} +{"id": "hqkhcFHOeKD", "original": "ls7TOhql7Q0M", "number": 4295, "cdate": 1632875743549, "mdate": null, "ddate": null, "tcdate": 1632875743549, "tmdate": 1750551547452, "tddate": null, "forum": "hqkhcFHOeKD", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "Learning Towards The Largest Margins", "authorids": ["~Xiong_Zhou3", "~Xianming_Liu5", "~Deming_Zhai2", "~Junjun_Jiang2", "~Xin_Gao1", "~Xiangyang_Ji1"], "authors": ["Xiong Zhou", "Xianming Liu", "Deming Zhai", "Junjun Jiang", "Xin Gao", "Xiangyang Ji"], "keywords": ["loss function design", "margin-based loss", "classification"], "abstract": "One of the main challenges for feature representation in deep learning-based classification is the design of appropriate loss functions that exhibit strong discriminative power. The classical softmax loss does not explicitly encourage discriminative learning of features. A popular direction of research is to incorporate margins in well-established losses in order to enforce extra intra-class compactness and inter-class separability, which, however, were developed through heuristic means, as opposed to rigorous mathematical principles. In this work, we attempt to address this limitation by formulating the principled optimization objective as learning towards the largest margins. Specifically, we firstly propose to employ the class margin as the measure of inter-class separability, and the sample margin as the measure of intra-class compactness. Accordingly, to encourage discriminative representation of features, the loss function should promote the largest possible margins for both classes and samples. Furthermore, we derive a generalized margin softmax loss to draw general conclusions for the existing margin-based losses. Not only does this principled framework offer new perspectives to understand and interpret existing margin-based losses, but it also provides new insights that can guide the design of new tools, including \\textit{sample margin regularization} and \\textit{largest margin softmax loss} for class balanced cases, and \\textit{zero centroid regularization} for class imbalanced cases. Experimental results demonstrate the effectiveness of our strategy for multiple tasks including visual classification, imbalanced classification, person re-identification, and face verification.", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "zhou|learning_towards_the_largest_margins", "pdf": "/pdf/05f12453b1762c08d54507567f592f91d86425be.pdf", "supplementary_material": "/attachment/27d3497792e0f4eff3040757232400ab271583da.zip", "data": "", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 1 code implementation](https://www.catalyzex.com/paper/learning-towards-the-largest-margins/code)", "_bibtex": "@inproceedings{\nzhou2022learning,\ntitle={Learning Towards The Largest Margins},\nauthor={Xiong Zhou and Xianming Liu and Deming Zhai and Junjun Jiang and Xin Gao and Xiangyang Ji},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=hqkhcFHOeKD}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 12}} +{"id": "28ib9tf6zhr", "original": "aIwgJOIOCgy", "number": 4289, "cdate": 1632875743141, "mdate": null, "ddate": null, "tcdate": 1632875743141, "tmdate": 1750551547797, "tddate": null, "forum": "28ib9tf6zhr", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "Patch-Fool: Are Vision Transformers Always Robust Against Adversarial Perturbations?", "authorids": ["~Yonggan_Fu1", "sz74@rice.edu", "sw99@rice.edu", "~Cheng_Wan2", "~Yingyan_Lin1"], "authors": ["Yonggan Fu", "Shunyao Zhang", "Shang Wu", "Cheng Wan", "Yingyan Lin"], "keywords": ["Vision transformer", "adversarial examples", "robustness"], "abstract": "Vision transformers (ViTs) have recently set off a new wave in neural architecture design thanks to their record-breaking performance in various vision tasks. In parallel, to fulfill the goal of deploying ViTs into real-world vision applications, their robustness against potential malicious attacks has gained increasing attention. In particular, recent works show that ViTs are more robust against adversarial attacks as compared with convolutional neural networks (CNNs), and conjecture that this is because ViTs focus more on capturing global interactions among different input/feature patches, leading to their improved robustness to local perturbations imposed by adversarial attacks. In this work, we ask an intriguing question: \"Under what kinds of perturbations do ViTs become more vulnerable learners compared to CNNs?\" Driven by this question, we first conduct a comprehensive experiment regarding the robustness of both ViTs and CNNs under various existing adversarial attacks to understand the underlying reason favoring their robustness. Based on the drawn insights, we then propose a dedicated attack framework, dubbed Patch-Fool, that fools the self-attention mechanism by attacking its basic component (i.e., a single patch) with a series of attention-aware optimization techniques. Interestingly, our Patch-Fool framework shows for the first time that ViTs are not necessarily more robust than CNNs against adversarial perturbations. In particular, we find that ViTs are more vulnerable learners compared with CNNs against our Patch-Fool attack which is consistent across extensive experiments, and the observations from Sparse/Mild Patch-Fool, two variants of Patch-Fool, indicate an intriguing insight that the perturbation density and strength on each patch seem to be the key factors that influence the robustness ranking between ViTs and CNNs. It can be expected that our Patch-Fool framework will shed light on both future architecture designs and training schemes for robustifying ViTs towards their real-world deployment. Our codes are available at https://github.com/RICE-EIC/Patch-Fool.", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "fu|patchfool_are_vision_transformers_always_robust_against_adversarial_perturbations", "pdf": "/pdf/4c7b8d2f80c4ea1bfe11754da2e7c69fc5183754.pdf", "one-sentence_summary": "We propose the Patch-Fool attack to unveil a vulnerability perspective of ViTs.", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 1 code implementation](https://www.catalyzex.com/paper/patch-fool-are-vision-transformers-always/code)", "_bibtex": "@inproceedings{\nfu2022patchfool,\ntitle={Patch-Fool: Are Vision Transformers Always Robust Against Adversarial Perturbations?},\nauthor={Yonggan Fu and Shunyao Zhang and Shang Wu and Cheng Wan and Yingyan Lin},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=28ib9tf6zhr}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 16}} +{"id": "Q5uh1Nvv5dm", "original": "EGeYxMp1aJs", "number": 4287, "cdate": 1632875743011, "mdate": null, "ddate": null, "tcdate": 1632875743011, "tmdate": 1750551548116, "tddate": null, "forum": "Q5uh1Nvv5dm", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "AdaMatch: A Unified Approach to Semi-Supervised Learning and Domain Adaptation", "authorids": ["~David_Berthelot1", "~Rebecca_Roelofs1", "~Kihyuk_Sohn1", "~Nicholas_Carlini1", "~Alexey_Kurakin1"], "authors": ["David Berthelot", "Rebecca Roelofs", "Kihyuk Sohn", "Nicholas Carlini", "Alexey Kurakin"], "keywords": ["unsupervised domain adaptation", "semi-supervised learning", "semi-supervised domain adaptation"], "abstract": "We extend semi-supervised learning to the problem of domain adaptation to learn significantly higher-accuracy models that train on one data distribution and test on a different one. With the goal of generality, we introduce AdaMatch, a unified solution for unsupervised domain adaptation (UDA), semi-supervised learning (SSL), and semi-supervised domain adaptation (SSDA). In an extensive experimental study, we compare its behavior with respective state-of-the-art techniques from SSL, SSDA, and UDA and find that AdaMatch either matches or significantly exceeds the state-of-the-art in each case using the same hyper-parameters regardless of the dataset or task. For example, AdaMatch nearly doubles the accuracy compared to that of the prior state-of-the-art on the UDA task for DomainNet and even exceeds the accuracy of the prior state-of-the-art obtained with pre-training by 6.4% when AdaMatch is trained completely from scratch. Furthermore, by providing AdaMatch with just one labeled example per class from the target domain (i.e., the SSDA setting), we increase the target accuracy by an additional 6.1%, and with 5 labeled examples, by 13.6%.", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "berthelot|adamatch_a_unified_approach_to_semisupervised_learning_and_domain_adaptation", "pdf": "/pdf/8dd30c7eff2e4f152d2d24368c232baec4e5e974.pdf", "one-sentence_summary": "We introduce AdaMatch, a unified solution that achieves state-of-the-art results for unsupervised domain adaptation (UDA), semi-supervised learning (SSL), and semi-supervised domain adaptation (SSDA).", "supplementary_material": "", "code": "", "data": "", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 1 code implementation](https://www.catalyzex.com/paper/adamatch-a-unified-approach-to-semi/code)", "_bibtex": "@inproceedings{\nberthelot2022adamatch,\ntitle={AdaMatch: A Unified Approach to Semi-Supervised Learning and Domain Adaptation},\nauthor={David Berthelot and Rebecca Roelofs and Kihyuk Sohn and Nicholas Carlini and Alexey Kurakin},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=Q5uh1Nvv5dm}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 13}} +{"id": "l_amHf1oaK", "original": "KzIWzveXoFk", "number": 4286, "cdate": 1632875742943, "mdate": null, "ddate": null, "tcdate": 1632875742943, "tmdate": 1750551548156, "tddate": null, "forum": "l_amHf1oaK", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "Complete Verification via Multi-Neuron Relaxation Guided Branch-and-Bound", "authorids": ["~Claudio_Ferrari2", "~Mark_Niklas_Mueller2", "~Nikola_Jovanović1", "~Martin_Vechev1"], "authors": ["Claudio Ferrari", "Mark Niklas Mueller", "Nikola Jovanović", "Martin Vechev"], "keywords": ["Certified Robustness", "Branch-and-Bound", "Convex Relaxation"], "abstract": "State-of-the-art neural network verifiers are fundamentally based on one of two paradigms: either encoding the whole verification problem via tight multi-neuron convex relaxations or applying a Branch-and-Bound (BaB) procedure leveraging imprecise but fast bounding methods on a large number of easier subproblems. The former can capture complex multi-neuron dependencies but sacrifices completeness due to the inherent limitations of convex relaxations. The latter enables complete verification but becomes increasingly ineffective on larger and more challenging networks. In this work, we present a novel complete verifier which combines the strengths of both paradigms: it leverages multi-neuron relaxations to drastically reduce the number of subproblems generated during the BaB process and an efficient GPU-based dual optimizer to solve the remaining ones. An extensive evaluation demonstrates that our verifier achieves a new state-of-the-art on both established benchmarks as well as networks with significantly higher accuracy than previously considered. The latter result (up to 28% certification gains) indicates meaningful progress towards creating verifiers that can handle practically relevant networks.", "one-sentence_summary": "We obtain a state-of-the-art GPU-based neural network verifier by leveraging tight multi-neuron constraints in a Branch-and-Bound setting.", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "ferrari|complete_verification_via_multineuron_relaxation_guided_branchandbound", "pdf": "/pdf/fcc20218f5754386cf64f4156a1f41039038b5da.pdf", "data": "", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 1 code implementation](https://www.catalyzex.com/paper/complete-verification-via-multi-neuron/code)", "_bibtex": "@inproceedings{\nferrari2022complete,\ntitle={Complete Verification via Multi-Neuron Relaxation Guided Branch-and-Bound},\nauthor={Claudio Ferrari and Mark Niklas Mueller and Nikola Jovanovi{\\'c} and Martin Vechev},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=l_amHf1oaK}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 21}} +{"id": "VFBjuF8HEp", "original": "e_4Ywe5oI3B", "number": 4276, "cdate": 1632875742271, "mdate": null, "ddate": null, "tcdate": 1632875742271, "tmdate": 1750551548480, "tddate": null, "forum": "VFBjuF8HEp", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "Learning Fast Samplers for Diffusion Models by Differentiating Through Sample Quality", "authorids": ["~Daniel_Watson1", "~William_Chan1", "~Jonathan_Ho1", "~Mohammad_Norouzi1"], "authors": ["Daniel Watson", "William Chan", "Jonathan Ho", "Mohammad Norouzi"], "keywords": [], "abstract": "Diffusion models have emerged as an expressive family of generative models rivaling GANs in sample quality and autoregressive models in likelihood scores. Standard diffusion models typically require hundreds of forward passes through the model to generate a single high-fidelity sample. We introduce Differentiable Diffusion Sampler Search (DDSS): a method that optimizes fast samplers for any pre-trained diffusion model by differentiating through sample quality scores. We also present Generalized Gaussian Diffusion Models (GGDM), a family of flexible non-Markovian samplers for diffusion models. We show that optimizing the degrees of freedom of GGDM samplers by maximizing sample quality scores via gradient descent leads to improved sample quality. Our optimization procedure backpropagates through the sampling process using the reparametrization trick and gradient rematerialization. DDSS achieves strong results on unconditional image generation across various datasets (e.g., FID scores on LSUN church 128x128 of 11.6 with only 10 inference steps, and 4.82 with 20 steps, compared to 51.1 and 14.9 with strongest DDPM/DDIM baselines). Our method is compatible with any pre-trained diffusion model without fine-tuning or re-training required.", "one-sentence_summary": "We propose a method to discover fast, high-fidelity samplers for diffusion probabilistic models.", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "watson|learning_fast_samplers_for_diffusion_models_by_differentiating_through_sample_quality", "pdf": "/pdf/56f0145dd15f32bd53f6dba7efde74914a88f663.pdf", "data": "", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 1 code implementation](https://www.catalyzex.com/paper/learning-fast-samplers-for-diffusion-models/code)", "_bibtex": "@inproceedings{\nwatson2022learning,\ntitle={Learning Fast Samplers for Diffusion Models by Differentiating Through Sample Quality},\nauthor={Daniel Watson and William Chan and Jonathan Ho and Mohammad Norouzi},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=VFBjuF8HEp}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 13}} +{"id": "lzupY5zjaU9", "original": "ueXHprnmO6S", "number": 4274, "cdate": 1632875742202, "mdate": null, "ddate": null, "tcdate": 1632875742202, "tmdate": 1750551548517, "tddate": null, "forum": "lzupY5zjaU9", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "Distribution Compression in Near-Linear Time", "authorids": ["~Abhishek_Shetty1", "~Raaz_Dwivedi1", "~Lester_Mackey1"], "authors": ["Abhishek Shetty", "Raaz Dwivedi", "Lester Mackey"], "keywords": ["Distribution compression", "linear time", "thinning", "i.i.d. sampling", "Markov chain Monte Carlo", "maximum mean discrepancy", "reproducing kernel Hilbert space"], "abstract": "In distribution compression, one aims to accurately summarize a probability distribution $\\mathbb{P}$ using a small number of representative points. Near-optimal thinning procedures achieve this goal by sampling $n$ points from a Markov chain and identifying $\\sqrt{n}$ points with $\\widetilde{\\mathcal{O}}(1/\\sqrt{n})$ discrepancy to $\\mathbb{P}$. Unfortunately, these algorithms suffer from quadratic or super-quadratic runtime in the sample size $n$. To address this deficiency, we introduce Compress++, a simple meta-procedure for speeding up any thinning algorithm while suffering at most a factor of $4$ in error. When combined with the quadratic-time kernel halving and kernel thinning algorithms of Dwivedi and Mackey (2021), Compress++ delivers $\\sqrt{n}$ points with $\\mathcal{O}(\\sqrt{\\log n/n})$ integration error and better-than-Monte-Carlo maximum mean discrepancy in $\\mathcal{O}(n \\log^3 n)$ time and $\\mathcal{O}( \\sqrt{n} \\log^2 n )$ space. Moreover, Compress++ enjoys the same near-linear runtime given any quadratic-time input and reduces the runtime of super-quadratic algorithms by a square-root factor. In our benchmarks with high-dimensional Monte Carlo samples and Markov chains targeting challenging differential equation posteriors, Compress++ matches or nearly matches the accuracy of its input algorithm in orders of magnitude less time.", "pdf": "/pdf/484f68f97f561be1f3272522336a9a0b1fa84bbc.pdf", "one-sentence_summary": "We introduce a simple algorithm for compressing an $n$-point summary of a probability distribution into a $\\sqrt{n}$-point summary of comparable quality in $O(n \\log^2 n)$ time.", "supplementary_material": "/attachment/d8d1db0c5d670ef21d0b2c6f8f991f806c77a127.zip", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "shetty|distribution_compression_in_nearlinear_time", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 1 code implementation](https://www.catalyzex.com/paper/distribution-compression-in-near-linear-time/code)", "_bibtex": "@inproceedings{\nshetty2022distribution,\ntitle={Distribution Compression in Near-Linear Time},\nauthor={Abhishek Shetty and Raaz Dwivedi and Lester Mackey},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=lzupY5zjaU9}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 16}} +{"id": "nnU3IUMJmN", "original": "Hk90kJpC8D1", "number": 4271, "cdate": 1632875742002, "mdate": null, "ddate": null, "tcdate": 1632875742002, "tmdate": 1750551548853, "tddate": null, "forum": "nnU3IUMJmN", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "Capturing Structural Locality in Non-parametric Language Models", "authorids": ["~Frank_F._Xu1", "~Junxian_He1", "~Graham_Neubig1", "~Vincent_Josua_Hellendoorn1"], "authors": ["Frank F. Xu", "Junxian He", "Graham Neubig", "Vincent Josua Hellendoorn"], "keywords": [], "abstract": "Structural locality is a ubiquitous feature of real-world datasets, wherein data points are organized into local hierarchies. Some examples include topical clusters in text or project hierarchies in source code repositories. In this paper, we explore utilizing this structural locality within non-parametric language models, which generate sequences that reference retrieved examples from an external source. We propose a simple yet effective approach for adding locality information into such models by adding learned parameters that improve the likelihood of retrieving examples from local neighborhoods. Experiments on two different domains, Java source code and Wikipedia text, demonstrate that locality features improve model efficacy over models without access to these features, with interesting differences. We also perform an analysis of how and where locality features contribute to improving performance and why the traditionally used contextual similarity metrics alone are not enough to grasp the locality structure.\n", "one-sentence_summary": " We propose, study the effect of, and incorporate structural locality in non-parametric language models.", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "xu|capturing_structural_locality_in_nonparametric_language_models", "pdf": "/pdf/05677eb0d7fca88dd7c4c6cbefa73f6ae430ad68.pdf", "supplementary_material": "/attachment/82559872eef8dd03d176c8d5ab48392be1b4929d.zip", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 1 code implementation](https://www.catalyzex.com/paper/capturing-structural-locality-in-non/code)", "_bibtex": "@inproceedings{\nxu2022capturing,\ntitle={Capturing Structural Locality in Non-parametric Language Models},\nauthor={Frank F. Xu and Junxian He and Graham Neubig and Vincent Josua Hellendoorn},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=nnU3IUMJmN}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 26}} +{"id": "9Nk6AJkVYB", "original": "Dl0Z1JMP1P", "number": 4269, "cdate": 1632875741868, "mdate": null, "ddate": null, "tcdate": 1632875741868, "tmdate": 1676330459784, "tddate": null, "forum": "9Nk6AJkVYB", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "Audio Lottery: Speech Recognition Made Ultra-Lightweight, Noise-Robust, and Transferable", "authorids": ["~Shaojin_Ding1", "~Tianlong_Chen1", "~Zhangyang_Wang1"], "authors": ["Shaojin Ding", "Tianlong Chen", "Zhangyang Wang"], "keywords": ["Speech Recognition", "Lottery Ticket Hypothesis"], "abstract": "Lightweight speech recognition models have seen explosive demands owing to a growing amount of speech-interactive features on mobile devices. Since designing such systems from scratch is non-trivial, practitioners typically choose to compress large (pre-trained) speech models. Recently, lottery ticket hypothesis reveals the existence of highly sparse subnetworks that can be trained in isolation without sacrificing the performance of the full models. In this paper, we investigate the tantalizing possibility of using lottery ticket hypothesis to discover lightweight speech recognition models, that are (1) robust to various noise existing in speech; (2) transferable to fit the open-world personalization; and 3) compatible with structured sparsity. We conducted extensive experiments on CNN-LSTM, RNN-Transducer, and Transformer models, and verified the existence of highly sparse winning tickets that can match the full model performance across those backbones. We obtained winning tickets that have less than 20% of full model weights on all backbones, while the most lightweight one only keeps 4.4% weights. Those winning tickets generalize to structured sparsity with no performance loss, and transfer exceptionally from large source datasets to various target datasets. Perhaps most surprisingly, when the training utterances have high background noises, the winning tickets even substantially outperform the full models, showing the extra bonus of noise robustness by inducing sparsity. Codes are available at https://github.com/VITA-Group/Audio-Lottery.", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "ding|audio_lottery_speech_recognition_made_ultralightweight_noiserobust_and_transferable", "pdf": "/pdf/3d42ff881f8ec8954935d0f8bbcb2a21d71106ea.pdf", "one-sentence_summary": "We for the first time investigate three unique properties that were rarely studied in previous LTH research but are key to user-interactive ASR devices, bringing new insights to both LTH theory and lightweight ASR research.", "data": "", "_bibtex": "@inproceedings{\nding2022audio,\ntitle={Audio Lottery: Speech Recognition Made Ultra-Lightweight, Noise-Robust, and Transferable},\nauthor={Shaojin Ding and Tianlong Chen and Zhangyang Wang},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=9Nk6AJkVYB}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 31}} +{"id": "swrMQttr6wN", "original": "7ccqrJXNdVN", "number": 4251, "cdate": 1632875740719, "mdate": null, "ddate": null, "tcdate": 1632875740719, "tmdate": 1750551549086, "tddate": null, "forum": "swrMQttr6wN", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "Learning to Map for Active Semantic Goal Navigation", "authorids": ["~Georgios_Georgakis1", "~Bernadette_Bucher1", "~Karl_Schmeckpeper1", "~Siddharth_Singh5", "~Kostas_Daniilidis1"], "authors": ["Georgios Georgakis", "Bernadette Bucher", "Karl Schmeckpeper", "Siddharth Singh", "Kostas Daniilidis"], "keywords": ["visual navigation", "semantic map", "uncertainty estimation"], "abstract": "We consider the problem of object goal navigation in unseen environments. Solving this problem requires learning of contextual semantic priors, a challenging endeavour given the spatial and semantic variability of indoor environments. Current methods learn to implicitly encode these priors through goal-oriented navigation policy functions operating on spatial representations that are limited to the agent's observable areas. In this work, we propose a novel framework that actively learns to generate semantic maps outside the field of view of the agent and leverages the uncertainty over the semantic classes in the unobserved areas to decide on long term goals. We demonstrate that through this spatial prediction strategy, we are able to learn semantic priors in scenes that can be leveraged in unknown environments. Additionally, we show how different objectives can be defined by balancing exploration with exploitation during searching for semantic targets. Our method is validated in the visually realistic environments of the Matterport3D dataset and show improved results on object goal navigation over competitive baselines.", "one-sentence_summary": "A framework for object goal navigation that actively learns to predict semantic maps and choose long-term goals based on uncertainty measures.", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "georgakis|learning_to_map_for_active_semantic_goal_navigation", "pdf": "/pdf/8097afd8a3e6d7c824f59390ca5a9cee0530bbd1.pdf", "code": "", "data": "", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 1 code implementation](https://www.catalyzex.com/paper/learning-to-map-for-active-semantic-goal/code)", "_bibtex": "@inproceedings{\ngeorgakis2022learning,\ntitle={Learning to Map for Active Semantic Goal Navigation},\nauthor={Georgios Georgakis and Bernadette Bucher and Karl Schmeckpeper and Siddharth Singh and Kostas Daniilidis},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=swrMQttr6wN}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 15}} +{"id": "1W0z96MFEoH", "original": "EKohCRblerU", "number": 4249, "cdate": 1632875740580, "mdate": null, "ddate": null, "tcdate": 1632875740580, "tmdate": 1750551549208, "tddate": null, "forum": "1W0z96MFEoH", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "Benchmarking the Spectrum of Agent Capabilities", "authorids": ["~Danijar_Hafner1"], "authors": ["Danijar Hafner"], "keywords": ["Evaluation", "Reinforcement Learning", "Environment", "Benchmark", "Unsupervised Reinforcement Learning", "Exploration"], "abstract": "Evaluating the general abilities of intelligent agents requires complex simulation environments. Existing benchmarks typically evaluate only one narrow task per environment, requiring researchers to perform expensive training runs on many different environments. We introduce Crafter, an open world survival game with visual inputs that evaluates a wide range of general abilities within a single environment. Agents either learn from the provided reward signal or through intrinsic objectives and are evaluated by semantically meaningful achievements that can be unlocked during each episode, such as discovering resources and crafting tools. Consistently unlocking all achievements requires strong generalization, deep exploration, and long-term reasoning. We experimentally verify that Crafter is of appropriate difficulty to drive future research and provide baselines scores of reward agents and unsupervised agents. Furthermore, we observe sophisticated behaviors emerging from maximizing the reward signal, such as building tunnel systems, bridges, houses, and plantations. We hope that Crafter will accelerate research progress by quickly evaluating a wide spectrum of abilities.", "pdf": "/pdf/116a18888b3fb460e882ec2b844128223e3b17ca.pdf", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "hafner|benchmarking_the_spectrum_of_agent_capabilities", "data": "", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 1 code implementation](https://www.catalyzex.com/paper/benchmarking-the-spectrum-of-agent/code)", "_bibtex": "@inproceedings{\nhafner2022benchmarking,\ntitle={Benchmarking the Spectrum of Agent Capabilities},\nauthor={Danijar Hafner},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=1W0z96MFEoH}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 29}} +{"id": "vqGi8Kp0wM", "original": "dzOb3ecLUjA", "number": 4248, "cdate": 1632875740513, "mdate": null, "ddate": null, "tcdate": 1632875740513, "tmdate": 1750551549273, "tddate": null, "forum": "vqGi8Kp0wM", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "Mind the Gap: Domain Gap Control for Single Shot Domain Adaptation for Generative Adversarial Networks", "authorids": ["~Peihao_Zhu1", "~Rameen_Abdal1", "~John_Femiani1", "~Peter_Wonka1"], "authors": ["Peihao Zhu", "Rameen Abdal", "John Femiani", "Peter Wonka"], "keywords": ["GAN", "StyleGAN", "Clip", "Domain Adaptation", "Style Transfer", "Single Shot"], "abstract": "We present a new method for one shot domain adaptation. The input to our method is trained GAN that can produce images in domain A and a single reference image I_B from domain B. The proposed algorithm can translate any output of the trained GAN from domain A to domain B. There are two main advantages of our method compared to the current state of the art: First, our solution achieves higher visual quality, e.g. by noticeably reducing overfitting. Second, our solution allows for more degrees of freedom to control the domain gap, i.e. what aspects of image I_B are used to define the domain B. Technically, we realize the new method by building on a pre-trained StyleGAN generator as GAN and a pre-trained CLIP model for representing the domain gap. We propose several new regularizers for controlling the domain gap to optimize the weights of the pre-trained StyleGAN generator to output images in domain B instead of domain A. The regularizers prevent the optimization from taking on too many attributes of the single reference image. Our results show significant visual improvements over the state of the art as well as multiple applications that highlight improved control.", "one-sentence_summary": "We propose several regularizers to control the domain transfer for single shot domain adaptation in the context of generative adversarial networks.", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "zhu|mind_the_gap_domain_gap_control_for_single_shot_domain_adaptation_for_generative_adversarial_networks", "pdf": "/pdf/2f6e593f100fa850ecde50e059aa6b2e73a3f6fe.pdf", "supplementary_material": "/attachment/7622d6fc37313f193ff1062891b66ec77c6cc639.zip", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 2 code implementations](https://www.catalyzex.com/paper/mind-the-gap-domain-gap-control-for-single/code)", "_bibtex": "@inproceedings{\nzhu2022mind,\ntitle={Mind the Gap: Domain Gap Control for Single Shot Domain Adaptation for Generative Adversarial Networks},\nauthor={Peihao Zhu and Rameen Abdal and John Femiani and Peter Wonka},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=vqGi8Kp0wM}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 15}} +{"id": "EnwCZixjSh", "original": "xju8EKevlaE", "number": 4244, "cdate": 1632875740242, "mdate": null, "ddate": null, "tcdate": 1632875740242, "tmdate": 1750551549563, "tddate": null, "forum": "EnwCZixjSh", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "On Evaluation Metrics for Graph Generative Models", "authorids": ["~Rylee_Thompson1", "~Boris_Knyazev1", "~Elahe_Ghalebi1", "~Jungtaek_Kim1", "~Graham_W._Taylor1"], "authors": ["Rylee Thompson", "Boris Knyazev", "Elahe Ghalebi", "Jungtaek Kim", "Graham W. Taylor"], "keywords": [], "abstract": "In image generation, generative models can be evaluated naturally by visually inspecting model outputs. However, this is not always the case for graph generative models (GGMs), making their evaluation challenging. Currently, the standard process for evaluating GGMs suffers from three critical limitations: i) it does not produce a single score which makes model selection challenging, ii) in many cases it fails to consider underlying edge and node features, and iii) it is prohibitively slow to perform. In this work, we mitigate these issues by searching for \\emph{scalar, domain-agnostic, and scalable metrics} for evaluating and ranking GGMs. To this end, we study existing GGM metrics and neural-network-based metrics emerging from generative models of images that use embeddings extracted from a task-specific network. Motivated by the power of Graph Neural Networks (GNNs) to extract meaningful graph representations \\emph{without any training}, we introduce several metrics based on the features extracted by an untrained random GNN. We design experiments to thoroughly test and objectively score metrics on their ability to measure the diversity and fidelity of generated graphs, as well as their sample and computational efficiency. Depending on the quantity of samples, we recommend one of two metrics from our collection of random-GNN-based metrics. We show these two metrics to be more expressive than pre-existing and alternative random-GNN-based metrics using our objective scoring. While we focus on applying these metrics to GGM evaluation, in practice this enables the ability to easily compute the dissimilarity between any two sets of graphs \\emph{regardless of domain}. Our code is released at: https://github.com/uoguelph-mlrg/GGM-metrics.", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "thompson|on_evaluation_metrics_for_graph_generative_models", "pdf": "/pdf/fcb94055fd54a7db263aab7d0f85b591c34e713e.pdf", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 1 code implementation](https://www.catalyzex.com/paper/on-evaluation-metrics-for-graph-generative/code)", "_bibtex": "@inproceedings{\nthompson2022on,\ntitle={On Evaluation Metrics for Graph Generative Models},\nauthor={Rylee Thompson and Boris Knyazev and Elahe Ghalebi and Jungtaek Kim and Graham W. Taylor},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=EnwCZixjSh}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 26}} +{"id": "HfUyCRBeQc", "original": "GrzJc8zN8nO8", "number": 4240, "cdate": 1632875739972, "mdate": null, "ddate": null, "tcdate": 1632875739972, "tmdate": 1676330461239, "tddate": null, "forum": "HfUyCRBeQc", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "Selective Ensembles for Consistent Predictions", "authorids": ["~Emily_Black1", "~Klas_Leino1", "~Matt_Fredrikson1"], "authors": ["Emily Black", "Klas Leino", "Matt Fredrikson"], "keywords": ["consistency", "prediction consistency", "model duplicity", "inconsistent predictions", "deep models", "deep networks", "explanations", "saliency maps", "gradient-based explanations", "fairness", "interpretability"], "abstract": "Recent work has shown that models trained to the same objective, and which achieve similar measures of accuracy on consistent test data, may nonetheless behave very differently on individual predictions. This inconsistency is undesirable in high-stakes contexts, such as medical diagnosis and finance. We show that this duplicitous behavior extends beyond predictions to feature attributions, which may likewise have negative implications for the intelligibility of a model, and one's ability to find recourse for subjects. We then introduce selective ensembles to mitigate such inconsistencies by applying hypothesis testing to the predictions of a set of models trained using randomly-selected starting conditions; importantly, selective ensembles can abstain in cases where a consistent outcome cannot be achieved up to a specified confidence level. We prove that that prediction disagreement between selective ensembles is bounded, and empirically demonstrate that selective ensembles achieve consistent predictions and feature attributions while maintaining low abstention rates. On several benchmark datasets, selective ensembles reach zero inconsistently predicted points, with abstention rates as low as 1.5%.", "one-sentence_summary": "Deep models give inconsistent predictions and explanations over small changes (e.g. random initialization). We can mitigate this by using selective ensemble models, which abstain from prediction if their constituent models do not agree sufficiently.", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "black|selective_ensembles_for_consistent_predictions", "pdf": "/pdf/aef96c65d43466af59147df0d990f0b94efbef7a.pdf", "supplementary_material": "/attachment/fa0e74655ac3aaa30612686210e7874379e58615.zip", "_bibtex": "@inproceedings{\nblack2022selective,\ntitle={Selective Ensembles for Consistent Predictions},\nauthor={Emily Black and Klas Leino and Matt Fredrikson},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=HfUyCRBeQc}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 14}} +{"id": "WLEx3Jo4QaB", "original": "v5Q9KzUUB8l", "number": 4239, "cdate": 1632875739903, "mdate": null, "ddate": null, "tcdate": 1632875739903, "tmdate": 1750551549636, "tddate": null, "forum": "WLEx3Jo4QaB", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "Graph Condensation for Graph Neural Networks", "authorids": ["~Wei_Jin4", "~Lingxiao_Zhao1", "~Shichang_Zhang2", "~Yozen_Liu1", "~Jiliang_Tang1", "~Neil_Shah2"], "authors": ["Wei Jin", "Lingxiao Zhao", "Shichang Zhang", "Yozen Liu", "Jiliang Tang", "Neil Shah"], "keywords": ["data-efficient learning", "graph generation", "graph neural networks"], "abstract": "Given the prevalence of large-scale graphs in real-world applications, the storage and time for training neural models have raised increasing concerns. To alleviate the concerns, we propose and study the problem of graph condensation for graph neural networks (GNNs). Specifically, we aim to condense the large, original graph into a small, synthetic and highly-informative graph, such that GNNs trained on the small graph and large graph have comparable performance. We approach the condensation problem by imitating the GNN training trajectory on the original graph through the optimization of a gradient matching loss and design a strategy to condense node futures and structural information simultaneously. Extensive experiments have demonstrated the effectiveness of the proposed framework in condensing different graph datasets into informative smaller graphs. In particular, we are able to approximate the original test accuracy by 95.3\\% on Reddit, 99.8\\% on Flickr and 99.0\\% on Citeseer, while reducing their graph size by more than 99.9\\%, and the condensed graphs can be used to train various GNN architectures. ", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "jin|graph_condensation_for_graph_neural_networks", "pdf": "/pdf/fb904d1d840eb264e6ab2e160ff7322153a1fbb0.pdf", "one-sentence_summary": "We study the problem of graph condensation which targets at condensing a large-real graph into a small-synthetic one while maintaining the performances of GNNs.", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 1 code implementation](https://www.catalyzex.com/paper/graph-condensation-for-graph-neural-networks/code)", "_bibtex": "@inproceedings{\njin2022graph,\ntitle={Graph Condensation for Graph Neural Networks},\nauthor={Wei Jin and Lingxiao Zhao and Shichang Zhang and Yozen Liu and Jiliang Tang and Neil Shah},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=WLEx3Jo4QaB}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 15}} +{"id": "bVvMOtLMiw", "original": "V4-eLvV2-iB", "number": 4238, "cdate": 1632875739832, "mdate": null, "ddate": null, "tcdate": 1632875739832, "tmdate": 1676330461357, "tddate": null, "forum": "bVvMOtLMiw", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "DIVA: Dataset Derivative of a Learning Task", "authorids": ["~Yonatan_Dukler1", "~Alessandro_Achille1", "~Giovanni_Paolini1", "~Avinash_Ravichandran1", "~Marzia_Polito1", "~Stefano_Soatto3"], "authors": ["Yonatan Dukler", "Alessandro Achille", "Giovanni Paolini", "Avinash Ravichandran", "Marzia Polito", "Stefano Soatto"], "keywords": ["Leave one out cross validation", "AutoML", "dataset optimization"], "abstract": "We present a method to compute the derivative of a learning task with respect to a dataset. A learning task is a function from a training set to the validation error, which can be represented by a trained deep neural network (DNN). The ``dataset derivative'' is a linear operator, computed around the trained model, that informs how perturbations of the weight of each training sample affect the validation error, usually computed on a separate validation dataset. Our method, DIVA (Differentiable Validation) hinges on a closed-form differentiable expression of the leave-one-out cross-validation error around a pre-trained DNN. Such expression constitutes the dataset derivative. DIVA could be used for dataset auto-curation, for example removing samples with faulty annotations, augmenting a dataset with additional relevant samples, or rebalancing. More generally, DIVA can be used to optimize the dataset, along with the parameters of the model, as part of the training process without the need for a separate validation dataset, unlike bi-level optimization methods customary in AutoML. To illustrate the flexibility of DIVA, we report experiments on sample auto-curation tasks such as outlier rejection, dataset extension, and automatic aggregation of multi-modal data.", "one-sentence_summary": "Presents a method to optimize a dataset based on a notion of a dataset derivative that is computed in closed form using linearization", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "dukler|diva_dataset_derivative_of_a_learning_task", "pdf": "/pdf/c20ae574c689fe5fbecb96f791b3e678973e0053.pdf", "supplementary_material": "/attachment/d872c8d7c427b934714a7377c281e34e34159490.zip", "_bibtex": "@inproceedings{\ndukler2022diva,\ntitle={{DIVA}: Dataset Derivative of a Learning Task},\nauthor={Yonatan Dukler and Alessandro Achille and Giovanni Paolini and Avinash Ravichandran and Marzia Polito and Stefano Soatto},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=bVvMOtLMiw}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 11}} +{"id": "sA4qIu3zv6v", "original": "L62IAUUi6zN", "number": 4231, "cdate": 1632875739358, "mdate": null, "ddate": null, "tcdate": 1632875739358, "tmdate": 1676330461466, "tddate": null, "forum": "sA4qIu3zv6v", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "Towards General Function Approximation in Zero-Sum Markov Games", "authorids": ["~Baihe_Huang1", "~Jason_D._Lee1", "~Zhaoran_Wang1", "~Zhuoran_Yang1"], "authors": ["Baihe Huang", "Jason D. Lee", "Zhaoran Wang", "Zhuoran Yang"], "keywords": [], "abstract": "This paper considers two-player zero-sum finite-horizon Markov games with simultaneous moves. The study focuses on the challenging settings where the value\nfunction or the model is parameterized by general function classes. Provably efficient\nalgorithms for both decoupled and coordinated settings are developed. In the decoupled setting where the agent controls a single player and plays against an arbitrary opponent, we propose a new model-free algorithm. The sample complexity is governed by the Minimax Eluder dimension—a new dimension of the function class in Markov games. As a special case, this method improves the state-of-the-art algorithm\nby a $\\sqrt{d}$ factor in the regret when the reward function and transition kernel are parameterized with d-dimensional linear features. In the coordinated setting where both\nplayers are controlled by the agent, we propose a model-based algorithm and a model-free algorithm. In the model-based algorithm, we prove that sample complexity can\nbe bounded by a generalization of Witness rank to Markov games. The model-free\nalgorithm enjoys a $\\sqrt{K}$-regret upper bound where $K$ is the number of episodes. Our\nalgorithms are based on new techniques of alternate optimism", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "huang|towards_general_function_approximation_in_zerosum_markov_games", "pdf": "/pdf/89164a5698b4ced1396254451108620fc52d5bc1.pdf", "supplementary_material": "/attachment/fbf79664452d7143663daadfb9e759e4a054117d.zip", "_bibtex": "@inproceedings{\nhuang2022towards,\ntitle={Towards General Function Approximation in Zero-Sum Markov Games},\nauthor={Baihe Huang and Jason D. Lee and Zhaoran Wang and Zhuoran Yang},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=sA4qIu3zv6v}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 13}} +{"id": "6PvWo1kEvlT", "original": "SS6i4pzUgHk", "number": 4229, "cdate": 1632875739224, "mdate": null, "ddate": null, "tcdate": 1632875739224, "tmdate": 1750551549983, "tddate": null, "forum": "6PvWo1kEvlT", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "Exposing the Implicit Energy Networks behind Masked Language Models via Metropolis--Hastings", "authorids": ["~Kartik_Goyal1", "~Chris_Dyer1", "~Taylor_Berg-Kirkpatrick1"], "authors": ["Kartik Goyal", "Chris Dyer", "Taylor Berg-Kirkpatrick"], "keywords": ["Masked Language Models", "Energy-based models", "Metropolis Hastings Monte Carlo", "Bidirectional Sequence models"], "abstract": "While recent work has shown that scores from models trained by the ubiquitous masked language modeling (MLM) objective effectively discriminate probable from improbable sequences, it is still an open question if these MLMs specify a principled probability distribution over the space of possible sequences. In this paper, we interpret MLMs as energy-based sequence models and propose two energy parametrizations derivable from the trained MLMs. In order to draw samples correctly from these models, we develop a tractable sampling scheme based on the Metropolis--Hastings Monte Carlo algorithm. In our approach, samples are proposed from the same masked conditionals used for training the masked language models, and they are accepted or rejected based on their energy values according to the target distribution. We validate the effectiveness of the proposed parametrizations by exploring the quality of samples drawn from these energy-based models for both open-ended unconditional generation and a conditional generation task of machine translation. We theoretically and empirically justify our sampling algorithm by showing that the masked conditionals on their own do not yield a Markov chain whose stationary distribution is that of our target distribution, and our approach generates higher quality samples than other recently proposed undirected generation approaches (Wang et al., 2019, Ghazvininejad et al., 2019).", "one-sentence_summary": "We interpret masked language models for sequences as energy based models and propose a tractable scheme inspired by Metropolis--Hasting Monte Carlo to draw samples from these models.", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "goyal|exposing_the_implicit_energy_networks_behind_masked_language_models_via_metropolishastings", "pdf": "/pdf/dfdc7212f0c035baaec71e0d9d64317aec15492b.pdf", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 1 code implementation](https://www.catalyzex.com/paper/exposing-the-implicit-energy-networks-behind/code)", "_bibtex": "@inproceedings{\ngoyal2022exposing,\ntitle={Exposing the Implicit Energy Networks behind Masked Language Models via Metropolis--Hastings},\nauthor={Kartik Goyal and Chris Dyer and Taylor Berg-Kirkpatrick},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=6PvWo1kEvlT}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 19}} +{"id": "EZNOb_uNpJk", "original": "S1N7EmvNdD", "number": 4226, "cdate": 1632875739026, "mdate": null, "ddate": null, "tcdate": 1632875739026, "tmdate": 1750551550095, "tddate": null, "forum": "EZNOb_uNpJk", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "ClimateGAN: Raising Climate Change Awareness by Generating Images of Floods", "authorids": ["~Victor_Schmidt2", "~Alexandra_Luccioni1", "~Mélisande_Teng1", "tianyu.zhang@mila.quebec", "alexia.reynaud@polymtl.ca", "~Sunand_Raghupathi1", "cosne.gautier@gmail.com", "a.juraver@gmail.com", "vardanyan.vahe@gmail.com", "~Alex_Hernández-García1", "~Yoshua_Bengio1"], "authors": ["Victor Schmidt", "Alexandra Luccioni", "Mélisande Teng", "Tianyu Zhang", "Alexia Reynaud", "Sunand Raghupathi", "Gautier Cosne", "Adrien Juraver", "Vahe Vardanyan", "Alex Hernández-García", "Yoshua Bengio"], "keywords": ["GAN", "Climate Change", "Domain Adaptation", "Representation Learning", "Computer Vision", "Application"], "abstract": "Climate change is a major threat to humanity and the actions required to prevent its catastrophic consequences include changes in both policy-making and individual behaviour. However, taking action requires understanding its seemingly abstract and distant consequences. Projecting the potential impacts of extreme climate events such as flooding in familiar places can help make the impacts of climate change more concrete and encourage action. As part of a larger initiative to build a website (https://thisclimatedoesnotexist.com) that projects extreme climate events onto user-chosen photos, we present our solution to simulate photo-realistic floods on authentic images. To address this complex task in the absence of suitable data, we propose ClimateGAN, a model that leverages both simulated and real data through unsupervised domain adaptation and conditional image generation. In this paper, we describe the details of our framework, thoroughly evaluate the main components of our architecture and demonstrate that our model is capable of robustly generating photo-realistic flooding on street images.", "one-sentence_summary": "This paper presents a model to robustly produce photo-realistic images of floods for raising climate change awareness, leveraging unsupervised domain adaptation and conditional image generation.", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "schmidt|climategan_raising_climate_change_awareness_by_generating_images_of_floods", "pdf": "/pdf/ca121d72177c0fb77244bde0b2958681a89d4b98.pdf", "supplementary_material": "/attachment/e35ffdfb3f901392fc63cf210a0634fe93414b4b.zip", "code": "", "data": "", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 4 code implementations](https://www.catalyzex.com/paper/climategan-raising-climate-change-awareness/code)", "_bibtex": "@inproceedings{\nschmidt2022climategan,\ntitle={Climate{GAN}: Raising Climate Change Awareness by Generating Images of Floods},\nauthor={Victor Schmidt and Alexandra Luccioni and M{\\'e}lisande Teng and Tianyu Zhang and Alexia Reynaud and Sunand Raghupathi and Gautier Cosne and Adrien Juraver and Vahe Vardanyan and Alex Hern{\\'a}ndez-Garc{\\'\\i}a and Yoshua Bengio},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=EZNOb_uNpJk}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 21}} +{"id": "nhN-fqxmNGx", "original": "Mqqa761ll6Df", "number": 4224, "cdate": 1632875738893, "mdate": null, "ddate": null, "tcdate": 1632875738893, "tmdate": 1676330462149, "tddate": null, "forum": "nhN-fqxmNGx", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "A Comparison of Hamming Errors of Representative Variable Selection Methods", "authorids": ["~Tracy_Ke1", "~Longlin_Wang1"], "authors": ["Tracy Ke", "Longlin Wang"], "keywords": ["Lasso", "Hamming error", "phase diagram", "rare and weak signals", "elastic net", "SCAD", "thresholded Lasso", "forward selection", "forward backward selection"], "abstract": "Lasso is a celebrated method for variable selection in linear models, but it faces challenges when the covariates are moderately or strongly correlated. This motivates alternative approaches such as using a non-convex penalty, adding a ridge regularization, or conducting a post-Lasso thresholding. In this paper, we compare Lasso with 5 other methods: Elastic net, SCAD, forward selection, thresholded Lasso, and forward backward selection. We measure their performances theoretically by the expected Hamming error, assuming that the regression coefficients are ${\\it iid}$ drawn from a two-point mixture and that the Gram matrix is block-wise diagonal. By deriving the rates of convergence of Hamming errors and the phase diagrams, we obtain useful conclusions about the pros and cons of different methods.", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "ke|a_comparison_of_hamming_errors_of_representative_variable_selection_methods", "pdf": "/pdf/ae8e44624ed225194ef2c6ef294ae6d5067515b8.pdf", "one-sentence_summary": "A theoretical comparison of the Hamming errors for 6 different variable selection methods", "supplementary_material": "/attachment/4f16334f29e6421dce68a951e210ea5abf0531db.zip", "_bibtex": "@inproceedings{\nke2022a,\ntitle={A Comparison of Hamming Errors of Representative Variable Selection Methods},\nauthor={Tracy Ke and Longlin Wang},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=nhN-fqxmNGx}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 5}} +{"id": "WE4qe9xlnQw", "original": "gCeTN7AdZhM", "number": 4218, "cdate": 1632875738488, "mdate": null, "ddate": null, "tcdate": 1632875738488, "tmdate": 1676330462613, "tddate": null, "forum": "WE4qe9xlnQw", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "A Program to Build E(N)-Equivariant Steerable CNNs ", "authorids": ["~Gabriele_Cesa1", "~Leon_Lang1", "~Maurice_Weiler1"], "authors": ["Gabriele Cesa", "Leon Lang", "Maurice Weiler"], "keywords": ["equivariance", "3D", "geometric deep learning", "isometries", "steerable CNN"], "abstract": "Equivariance is becoming an increasingly popular design choice to build data efficient neural networks by exploiting prior knowledge about the symmetries of the problem at hand. Euclidean steerable CNNs are one of the most common classes of equivariant networks. While the constraints these architectures need to satisfy are understood, existing approaches are tailored to specific (classes of) groups. No generally applicable method that is practical for implementation has been described so far. In this work, we generalize the Wigner-Eckart theorem proposed in Lang & Weiler (2020), which characterizes general $G$-steerable kernel spaces for compact groups $G$ over their homogeneous spaces, to arbitrary $G$-spaces. This enables us to directly parameterize filters in terms of a band-limited basis on the whole space rather than on $G$'s orbits, but also to easily implement steerable CNNs equivariant to a large number of groups. To demonstrate its generality, we instantiate our method on a variety of isometry groups acting on the Euclidean space $\\mathbb{R}^3$. Our framework allows us to build $E(3)$ and $SE(3)$-steerable CNNs like previous works, but also CNNs with arbitrary $G\\leq O(3)$-steerable kernels. For example, we build 3D CNNs equivariant to the symmetries of platonic solids or choose $G=SO(2)$ when working with 3D data having only azimuthal symmetries. We compare these models on 3D shapes and molecular datasets, observing improved performance by matching the model's symmetries to the ones of the data.", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "cesa|a_program_to_build_enequivariant_steerable_cnns", "pdf": "/pdf/6d634b6f1eabc70593f897e223c78025e3029b52.pdf", "one-sentence_summary": "We derive a general method to build G-steerable kernel spaces for equivariant steerable CNNs", "supplementary_material": "/attachment/8e59589617d819e70c68a8002c689047b6750a52.zip", "data": "", "_bibtex": "@inproceedings{\ncesa2022a,\ntitle={A Program to Build E(N)-Equivariant Steerable {CNN}s },\nauthor={Gabriele Cesa and Leon Lang and Maurice Weiler},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=WE4qe9xlnQw}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 14}} +{"id": "UdxJ2fJx7N0", "original": "qqoU5AdqMzfa", "number": 4217, "cdate": 1632875738421, "mdate": null, "ddate": null, "tcdate": 1632875738421, "tmdate": 1750551550369, "tddate": null, "forum": "UdxJ2fJx7N0", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "Minimax Optimization with Smooth Algorithmic Adversaries", "authorids": ["~Tanner_Fiez1", "~Chi_Jin1", "~Praneeth_Netrapalli1", "~Lillian_J_Ratliff1"], "authors": ["Tanner Fiez", "Chi Jin", "Praneeth Netrapalli", "Lillian J Ratliff"], "keywords": ["Minimax optimization", "two player zero sum games", "generative adversarial networks", "adversarial training"], "abstract": "This paper considers minimax optimization $\\min_x \\max_y f(x, y)$ in the challenging setting where $f$ can be both nonconvex in $x$ and nonconcave in $y$. Though such optimization problems arise in many machine learning paradigms including training generative adversarial networks (GANs) and adversarially robust models, from a theoretical point of view, two fundamental issues remain: (i) the absence of simple and efficiently computable optimality notions, and (ii) cyclic or diverging behavior of existing algorithms. This paper proposes a new theoretical framework for nonconvex-nonconcave minimax optimization that addresses both of the above issues. The starting point of this paper is the observation that, under a computational budget, the max-player can not fully maximize $f(x,\\cdot)$ since nonconcave maximization is NP-hard in general. So, we propose a new framework, and a corresponding algorithm, for the min-player to play against \\emph{smooth algorithms} deployed by the adversary (i.e., the max-player) instead of against full maximization. Our algorithm is guaranteed to make monotonic progress (thus having no limit cycles or diverging behavior), and to find an appropriate ``stationary point'' in a polynomial number of iterations. Our framework covers practically relevant settings where the smooth algorithms deployed by the adversary are multi-step stochastic gradient ascent, and its accelerated version. We further present experimental results that confirm our theoretical findings and demonstrate the effectiveness of the proposed approach in practice on simple, conceptual settings.", "one-sentence_summary": "We propose a tractable formulation of minimax optimization by modeling the adversary's algorithm, and present new algorithms which are guaranteed to converge and find appropriate stationary points.", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "fiez|minimax_optimization_with_smooth_algorithmic_adversaries", "pdf": "/pdf/6f978c34600cf6fcf440c6e1bf8d1f93e0afce3d.pdf", "supplementary_material": "/attachment/4e2ba35603ae73bda8e10822707db81c5277ec74.zip", "code": "", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 1 code implementation](https://www.catalyzex.com/paper/minimax-optimization-with-smooth-algorithmic/code)", "_bibtex": "@inproceedings{\nfiez2022minimax,\ntitle={Minimax Optimization with Smooth Algorithmic Adversaries},\nauthor={Tanner Fiez and Chi Jin and Praneeth Netrapalli and Lillian J Ratliff},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=UdxJ2fJx7N0}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 10}} +{"id": "CI-xXX9dg9l", "original": "8SBdOP5ommX", "number": 4202, "cdate": 1632875737422, "mdate": null, "ddate": null, "tcdate": 1632875737422, "tmdate": 1676330463125, "tddate": null, "forum": "CI-xXX9dg9l", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "On Distributed Adaptive Optimization with Gradient Compression", "authorids": ["~Xiaoyun_Li1", "~Belhal_Karimi1", "~Ping_Li3"], "authors": ["Xiaoyun Li", "Belhal Karimi", "Ping Li"], "keywords": [], "abstract": "We study COMP-AMS, a distributed optimization framework based on gradient averaging and adaptive AMSGrad algorithm. Gradient compression with error feedback is applied to reduce the communication cost in the gradient transmission process. Our convergence analysis of COMP-AMS shows that such compressed gradient averaging strategy yields same convergence rate as standard AMSGrad, and also exhibits the linear speedup effect w.r.t. the number of local workers. Compared with recently proposed protocols on distributed adaptive methods, COMP-AMS is simple and convenient. Numerical experiments are conducted to justify the theoretical findings, and demonstrate that the proposed method can achieve same test accuracy as the full-gradient AMSGrad with substantial communication savings. With its simplicity and efficiency, COMP-AMS can serve as a useful distributed training framework for adaptive methods.", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "li|on_distributed_adaptive_optimization_with_gradient_compression", "pdf": "/pdf/84313c8e0bf7b65d71addc3b16aba48f161f4092.pdf", "data": "", "_bibtex": "@inproceedings{\nli2022on,\ntitle={On Distributed Adaptive Optimization with Gradient Compression},\nauthor={Xiaoyun Li and Belhal Karimi and Ping Li},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=CI-xXX9dg9l}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 7}} +{"id": "o_HsiMPYh_x", "original": "OjUMTP0Awx", "number": 4190, "cdate": 1632875736617, "mdate": null, "ddate": null, "tcdate": 1632875736617, "tmdate": 1750551550836, "tddate": null, "forum": "o_HsiMPYh_x", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "Leveraging unlabeled data to predict out-of-distribution performance", "authorids": ["~Saurabh_Garg3", "~Sivaraman_Balakrishnan1", "~Zachary_Chase_Lipton1", "~Behnam_Neyshabur1", "~Hanie_Sedghi1"], "authors": ["Saurabh Garg", "Sivaraman Balakrishnan", "Zachary Chase Lipton", "Behnam Neyshabur", "Hanie Sedghi"], "keywords": ["Distribution Shift", "OOD error prediction", "Deep Learning"], "abstract": "Real-world machine learning deployments are characterized by mismatches between the source (training) and target (test) distributions\nthat may cause performance drops. In this work, we investigate methods for predicting the target domain accuracy using only labeled source data and unlabeled target data. We propose Average Thresholded Confidence (ATC), a practical method that learns a \\emph{threshold} on the model's confidence, predicting accuracy as the fraction of unlabeled examples for which model confidence exceeds that threshold. ATC outperforms previous methods across several model architectures, types of distribution shifts (e.g., due to synthetic corruptions, dataset reproduction, or novel subpopulations), and datasets (\\textsc{Wilds}-FMoW, ImageNet, \\breeds, CIFAR, and MNIST). In our experiments, ATC estimates target performance $2\\text{--}4\\times$ more accurately than prior methods. We also explore the theoretical foundations of the problem, proving that, in general, identifying the accuracy is just as hard as identifying the optimal predictor and thus, the efficacy of any method rests upon (perhaps unstated) assumptions on the nature of the shift. Finally, analyzing our method on some toy distributions, we provide insights concerning when it works.\n\n", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "garg|leveraging_unlabeled_data_to_predict_outofdistribution_performance", "pdf": "/pdf/f94008d1c0cfc4177d8617db211b62b1f85906ea.pdf", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 3 code implementations](https://www.catalyzex.com/paper/leveraging-unlabeled-data-to-predict-out-of/code)", "_bibtex": "@inproceedings{\ngarg2022leveraging,\ntitle={Leveraging unlabeled data to predict out-of-distribution performance},\nauthor={Saurabh Garg and Sivaraman Balakrishnan and Zachary Chase Lipton and Behnam Neyshabur and Hanie Sedghi},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=o_HsiMPYh_x}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 21}} +{"id": "7udZAsEzd60", "original": "07Whlyou2JC", "number": 4186, "cdate": 1632875736348, "mdate": null, "ddate": null, "tcdate": 1632875736348, "tmdate": 1750551550910, "tddate": null, "forum": "7udZAsEzd60", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "VC dimension of partially quantized neural networks in the overparametrized regime", "authorids": ["~Yutong_Wang1", "~Clayton_Scott1"], "authors": ["Yutong Wang", "Clayton Scott"], "keywords": ["VC dimension", "quantized neural networks", "classification", "minimax theory", "overparametrization"], "abstract": "Vapnik-Chervonenkis (VC) theory has so far been unable to explain the small generalization error of overparametrized neural networks. Indeed, existing applications of VC theory to large networks obtain upper bounds on VC dimension that are proportional to the number of weights, and for a large class of networks, these upper bound are known to be tight. In this work, we focus on a class of partially quantized networks that we refer to as hyperplane arrangement neural networks (HANNs). Using a sample compression analysis, we show that HANNs can have VC dimension significantly smaller than the number of weights, while being highly expressive. In particular, empirical risk minimization over HANNs in the overparametrized regime achieves the minimax rate for classification with Lipschitz posterior class probability. We further demonstrate the expressivity of HANNs empirically. On a panel of 121 UCI datasets, overparametrized HANNs are able to match the performance of state-of-the-art full-precision models.", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "wang|vc_dimension_of_partially_quantized_neural_networks_in_the_overparametrized_regime", "pdf": "/pdf/9760187606b3496a5f4a0fe752a22416bb4a2e21.pdf", "one-sentence_summary": "We apply VC theory to analyze the performance of a neural network in the overparametrized regime and obtain a minimax-optimality result.", "supplementary_material": "/attachment/0a8e41070fc65e398f4f8fb821807fb71ff3378e.zip", "code": "", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 1 code implementation](https://www.catalyzex.com/paper/vc-dimension-of-partially-quantized-neural/code)", "_bibtex": "@inproceedings{\nwang2022vc,\ntitle={{VC} dimension of partially quantized neural networks in the overparametrized regime},\nauthor={Yutong Wang and Clayton Scott},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=7udZAsEzd60}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 11}} +{"id": "Rf58LPCwJj0", "original": "-njhPrIYcw3c", "number": 4179, "cdate": 1632875735874, "mdate": null, "ddate": null, "tcdate": 1632875735874, "tmdate": 1750551551029, "tddate": null, "forum": "Rf58LPCwJj0", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "Optimal Representations for Covariate Shift", "authorids": ["~Yangjun_Ruan1", "~Yann_Dubois1", "~Chris_J._Maddison1"], "authors": ["Yangjun Ruan", "Yann Dubois", "Chris J. Maddison"], "keywords": ["distribution shift", "domain generalization", "representation learning", "self-supervised learning", "invariance", "robustness"], "abstract": "Machine learning systems often experience a distribution shift between training and testing. In this paper, we introduce a simple variational objective whose optima are exactly the set of all representations on which risk minimizers are guaranteed to be robust to any distribution shift that preserves the Bayes predictor, e.g., covariate shifts. Our objective has two components. First, a representation must remain discriminative for the task, i.e., some predictor must be able to simultaneously minimize the source and target risk. Second, the representation's marginal support needs to be the same across source and target. We make this practical by designing self-supervised objectives that only use unlabelled data and augmentations to train robust representations. \nOur objectives give insights into the robustness of CLIP, and further improve CLIP's representations to achieve SOTA results on DomainBed.", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "ruan|optimal_representations_for_covariate_shift", "pdf": "/pdf/ddc6369b11aed2bc1a72bc2f493bb2ebd0f65be7.pdf", "one-sentence_summary": "We give a simple variational objective whose optima are exactly the set of representations that are robust under covariate shift", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 2 code implementations](https://www.catalyzex.com/paper/optimal-representations-for-covariate-shift/code)", "_bibtex": "@inproceedings{\nruan2022optimal,\ntitle={Optimal Representations for Covariate Shift},\nauthor={Yangjun Ruan and Yann Dubois and Chris J. Maddison},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=Rf58LPCwJj0}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 18}} +{"id": "ei3SY1_zYsE", "original": "7i9FP3LesvH", "number": 4178, "cdate": 1632875735807, "mdate": null, "ddate": null, "tcdate": 1632875735807, "tmdate": 1750551551202, "tddate": null, "forum": "ei3SY1_zYsE", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "Fortuitous Forgetting in Connectionist Networks", "authorids": ["~Hattie_Zhou1", "~Ankit_Vani1", "~Hugo_Larochelle1", "~Aaron_Courville3"], "authors": ["Hattie Zhou", "Ankit Vani", "Hugo Larochelle", "Aaron Courville"], "keywords": ["Neural Networks", "Generalization", "Iterative Training", "Compositionality", "Iterated Learning"], "abstract": "Forgetting is often seen as an unwanted characteristic in both human and machine learning. However, we propose that forgetting can in fact be favorable to learning. We introduce forget-and-relearn as a powerful paradigm for shaping the learning trajectories of artificial neural networks. In this process, the forgetting step selectively removes undesirable information from the model, and the relearning step reinforces features that are consistently useful under different conditions. The forget-and-relearn framework unifies many existing iterative training algorithms in the image classification and language emergence literature, and allows us to understand the success of these algorithms in terms of the disproportionate forgetting of undesirable information. We leverage this understanding to improve upon existing algorithms by designing more targeted forgetting operations. Insights from our analysis provide a coherent view on the dynamics of iterative training in neural networks and offer a clear path towards performance improvements.", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "zhou|fortuitous_forgetting_in_connectionist_networks", "pdf": "/pdf/ca4d5fd0fac40867b797ca356f4056c7cb11fc6a.pdf", "one-sentence_summary": "We introduce \"forget-and-relearn\" as a training paradigm where forgetting removes undesirable information and relearning bolsters useful features towards better generalization and compositionality.", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 1 code implementation](https://www.catalyzex.com/paper/fortuitous-forgetting-in-connectionist/code)", "_bibtex": "@inproceedings{\nzhou2022fortuitous,\ntitle={Fortuitous Forgetting in Connectionist Networks},\nauthor={Hattie Zhou and Ankit Vani and Hugo Larochelle and Aaron Courville},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=ei3SY1_zYsE}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 19}} +{"id": "So6YAqnqgMj", "original": "8dGddB7LRc1X", "number": 4175, "cdate": 1632875735606, "mdate": null, "ddate": null, "tcdate": 1632875735606, "tmdate": 1676330464400, "tddate": null, "forum": "So6YAqnqgMj", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "EigenGame Unloaded: When playing games is better than optimizing", "authorids": ["~Ian_Gemp1", "~Brian_McWilliams2", "~Claire_Vernade1", "~Thore_Graepel1"], "authors": ["Ian Gemp", "Brian McWilliams", "Claire Vernade", "Thore Graepel"], "keywords": ["pca", "principal components analysis", "nash", "games", "eigendecomposition", "svd", "singular value decomposition"], "abstract": "We build on the recently proposed EigenGame that views eigendecomposition as a competitive game. EigenGame's updates are biased if computed using minibatches of data, which hinders convergence and more sophisticated parallelism in the stochastic setting. In this work, we propose an unbiased stochastic update that is asymptotically equivalent to EigenGame, enjoys greater parallelism allowing computation on datasets of larger sample sizes, and outperforms EigenGame in experiments. We present applications to finding the principal components of massive datasets and performing spectral clustering of graphs. We analyze and discuss our proposed update in the context of EigenGame and the shift in perspective from optimization to games.", "one-sentence_summary": "We improve the EigenGame algorithm by removing update bias, enabling further parallelism and better performance.", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "gemp|eigengame_unloaded_when_playing_games_is_better_than_optimizing", "pdf": "/pdf/cedcb096f43d8f1b1e43c8969cf5b1dd7e83d5ae.pdf", "supplementary_material": "/attachment/1bb9f80ed132a3de78ef901e29fcfe86fe5533aa.zip", "_bibtex": "@inproceedings{\ngemp2022eigengame,\ntitle={EigenGame Unloaded: When playing games is better than optimizing},\nauthor={Ian Gemp and Brian McWilliams and Claire Vernade and Thore Graepel},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=So6YAqnqgMj}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 11}} +{"id": "Oh1r2wApbPv", "original": "2-1zPOoichsq", "number": 4167, "cdate": 1632875735066, "mdate": null, "ddate": null, "tcdate": 1632875735066, "tmdate": 1750551551483, "tddate": null, "forum": "Oh1r2wApbPv", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "Contextualized Scene Imagination for Generative Commonsense Reasoning", "authorids": ["~PeiFeng_Wang1", "jzamoraa@ucsd.edu", "liujunfe@usc.edu", "~Filip_Ilievski1", "~Muhao_Chen1", "~Xiang_Ren1"], "authors": ["PeiFeng Wang", "Jonathan Zamora", "Junfeng Liu", "Filip Ilievski", "Muhao Chen", "Xiang Ren"], "keywords": ["Commonsense reasoning", "constrained text generation", "knowledge representation"], "abstract": "Humans use natural language to compose common concepts from their environment into plausible, day-to-day scene descriptions. However, such generative commonsense reasoning (GCSR) skills are lacking in state-of-the-art text generation methods. Descriptive sentences about arbitrary concepts generated by neural text generation models (e.g., pre-trained text-to-text Transformers) are often grammatically fluent but may not correspond to human common sense, largely due to their lack of mechanisms to capture concept relations, to identify implicit concepts, and to perform generalizable reasoning about unseen concept compositions. In this paper, we propose an Imagine-and-Verbalize (I\\&V) method, which learns to imagine a relational scene knowledge graph (SKG) with relations between the input concepts, and leverage the SKG as a constraint when generating a plausible scene description. We collect and harmonize a set of knowledge resources from different domains and modalities, providing a rich auxiliary supervision signal for I\\&V. The experiments demonstrate the effectiveness of I\\&V in improving language models on both concept-to-sentence and concept-to-story generation tasks, while enabling the model to learn well from fewer task examples and generate SKGs that make common sense to human annotators.", "one-sentence_summary": "This work aims at tackling generative commonsense reasoning by allowing machines to imagine a reasonable scene before generating text.", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "wang|contextualized_scene_imagination_for_generative_commonsense_reasoning", "pdf": "/pdf/a66e1b12b2211131a44463611c8c272c21decbfb.pdf", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 1 code implementation](https://www.catalyzex.com/paper/contextualized-scene-imagination-for/code)", "_bibtex": "@inproceedings{\nwang2022contextualized,\ntitle={Contextualized Scene Imagination for Generative Commonsense Reasoning},\nauthor={PeiFeng Wang and Jonathan Zamora and Junfeng Liu and Filip Ilievski and Muhao Chen and Xiang Ren},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=Oh1r2wApbPv}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 25}} +{"id": "Wm3EA5OlHsG", "original": "FOt3bNmDFnv", "number": 4165, "cdate": 1632875734935, "mdate": null, "ddate": null, "tcdate": 1632875734935, "tmdate": 1690470727117, "tddate": null, "forum": "Wm3EA5OlHsG", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "Scene Transformer: A unified architecture for predicting future trajectories of multiple agents", "authorids": ["~Jiquan_Ngiam1", "~Vijay_Vasudevan1", "~Benjamin_Caine1", "~Zhengdong_Zhang3", "~Hao-Tien_Lewis_Chiang1", "~Jeffrey_Ling1", "~Rebecca_Roelofs1", "~Alex_Bewley1", "~Chenxi_Liu1", "~Ashish_Venugopal1", "~David_J_Weiss1", "~Benjamin_Sapp3", "~Zhifeng_Chen1", "~Jonathon_Shlens1"], "authors": ["Jiquan Ngiam", "Vijay Vasudevan", "Benjamin Caine", "Zhengdong Zhang", "Hao-Tien Lewis Chiang", "Jeffrey Ling", "Rebecca Roelofs", "Alex Bewley", "Chenxi Liu", "Ashish Venugopal", "David J Weiss", "Benjamin Sapp", "Zhifeng Chen", "Jonathon Shlens"], "keywords": ["trajectory prediction", "motion forecasting", "multi-task learning", "attention", "autonomous vehicles"], "abstract": "Predicting the motion of multiple agents is necessary for planning in dynamic environments. This task is challenging for autonomous driving since agents (e.g., vehicles and pedestrians) and their associated behaviors may be diverse and influence one another. Most prior work have focused on predicting independent futures for each agent based on all past motion, and planning against these independent predictions. However, planning against independent predictions can make it challenging to represent the future interaction possibilities between different agents, leading to sub-optimal planning. In this work, we formulate a model for predicting the behavior of all agents jointly, producing consistent futures that account for interactions between agents. Inspired by recent language modeling approaches, we use a masking strategy as the query to our model, enabling one to invoke a single model to predict agent behavior in many ways, such as potentially conditioned on the goal or full future trajectory of the autonomous vehicle or the behavior of other agents in the environment. Our model architecture employs attention to combine features across road elements, agent interactions, and time steps. We evaluate our approach on autonomous driving datasets for both marginal and joint motion prediction, and achieve state of the art performance across two popular datasets. Through combining a scene-centric approach, agent permutation equivariant model, and a sequence masking strategy, we show that our model can unify a variety of motion prediction tasks from joint motion predictions to conditioned prediction.", "one-sentence_summary": "We introduce a scene-centric masked sequence based motion prediction model that unifies a variety of motion prediction tasks from joint motion predictions to conditioned prediction.", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "ngiam|scene_transformer_a_unified_architecture_for_predicting_future_trajectories_of_multiple_agents", "pdf": "/pdf/92f191f2cdcf1389ed2d3dce901833dc5fc6deaf.pdf", "data": "", "_bibtex": "@inproceedings{\nngiam2022scene,\ntitle={Scene Transformer: A unified architecture for predicting future trajectories of multiple agents},\nauthor={Jiquan Ngiam and Vijay Vasudevan and Benjamin Caine and Zhengdong Zhang and Hao-Tien Lewis Chiang and Jeffrey Ling and Rebecca Roelofs and Alex Bewley and Chenxi Liu and Ashish Venugopal and David J Weiss and Benjamin Sapp and Zhifeng Chen and Jonathon Shlens},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=Wm3EA5OlHsG}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 15}} +{"id": "qY79G8jGsep", "original": "tXGC-pZS7m", "number": 4164, "cdate": 1632875734869, "mdate": null, "ddate": null, "tcdate": 1632875734869, "tmdate": 1750551551530, "tddate": null, "forum": "qY79G8jGsep", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "DISSECT: Disentangled Simultaneous Explanations via Concept Traversals", "authorids": ["~Asma_Ghandeharioun1", "~Been_Kim1", "~Chun-Liang_Li1", "~Brendan_Jou1", "~Brian_Eoff1", "~Rosalind_Picard1"], "authors": ["Asma Ghandeharioun", "Been Kim", "Chun-Liang Li", "Brendan Jou", "Brian Eoff", "Rosalind Picard"], "keywords": ["Explainability", "Interpretability", "Counterfactual generation", "Generative Adversarial Network", "Variational Autoencoder"], "abstract": "Explaining deep learning model inferences is a promising venue for scientific understanding, improving safety, uncovering hidden biases, evaluating fairness, and beyond, as argued by many scholars. One of the principal benefits of counterfactual explanations is allowing users to explore \"what-if\" scenarios through what does not and cannot exist in the data, a quality that many other forms of explanation such as heatmaps and influence functions are inherently incapable of doing. However, most previous work on generative explainability cannot disentangle important concepts effectively, produces unrealistic examples, or fails to retain relevant information. We propose a novel approach, DISSECT, that jointly trains a generator, a discriminator, and a concept disentangler to overcome such challenges using little supervision. DISSECT generates Concept Traversals (CTs), defined as a sequence of generated examples with increasing degrees of concepts that influence a classifier's decision. By training a generative model from a classifier's signal, DISSECT offers a way to discover a classifier's inherent \"notion\" of distinct concepts automatically rather than rely on user-predefined concepts. We show that DISSECT produces CTs that (1) disentangle several concepts, (2) are influential to a classifier's decision and are coupled to its reasoning due to joint training (3), are realistic, (4) preserve relevant information, and (5) are stable across similar inputs. We validate DISSECT on several challenging synthetic and realistic datasets where previous methods fall short of satisfying desirable criteria for interpretability and show that it performs consistently well. Finally, we present experiments showing applications of DISSECT for detecting potential biases of a classifier and identifying spurious artifacts that impact predictions.", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "ghandeharioun|dissect_disentangled_simultaneous_explanations_via_concept_traversals", "pdf": "/pdf/8e8a8d5dafd24c9cba49d3671b2ee34d0decdecf.pdf", "one-sentence_summary": "We propose a novel counterfactual explainability method that simultaneously satisfies several desirable qualities where other methods fail by training a generator, a discriminator, and a concept disentangler using the classifier’s signal.", "code": "", "data": "", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 4 code implementations](https://www.catalyzex.com/paper/dissect-disentangled-simultaneous/code)", "_bibtex": "@inproceedings{\nghandeharioun2022dissect,\ntitle={{DISSECT}: Disentangled Simultaneous Explanations via Concept Traversals},\nauthor={Asma Ghandeharioun and Been Kim and Chun-Liang Li and Brendan Jou and Brian Eoff and Rosalind Picard},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=qY79G8jGsep}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 12}} +{"id": "Az7opqbQE-3", "original": "H5RcWSPfvj", "number": 4163, "cdate": 1632875734805, "mdate": null, "ddate": null, "tcdate": 1632875734805, "tmdate": 1750551551578, "tddate": null, "forum": "Az7opqbQE-3", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "Heteroscedastic Temporal Variational Autoencoder For Irregularly Sampled Time Series", "authorids": ["~Satya_Narayan_Shukla1", "~Benjamin_Marlin1"], "authors": ["Satya Narayan Shukla", "Benjamin Marlin"], "keywords": ["irregular sampling", "uncertainty", "imputation", "interpolation", "multivariate time series", "missing data", "variational autoencoder"], "abstract": "Irregularly sampled time series commonly occur in several domains where they present a significant challenge to standard deep learning models. In this paper, we propose a new deep learning framework for probabilistic interpolation of irregularly sampled time series that we call the Heteroscedastic Temporal Variational Autoencoder (HeTVAE). HeTVAE includes a novel input layer to encode information about input observation sparsity, a temporal VAE architecture to propagate uncertainty due to input sparsity, and a heteroscedastic output layer to enable variable uncertainty in the output interpolations. Our results show that the proposed architecture is better able to reflect variable uncertainty through time due to sparse and irregular sampling than a range of baseline and traditional models, as well as recently proposed deep latent variable models that use homoscedastic output layers.", "one-sentence_summary": "We present a new deep learning architecture for probabilistic interpolation of irregularly sampled time series.", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "shukla|heteroscedastic_temporal_variational_autoencoder_for_irregularly_sampled_time_series", "pdf": "/pdf/4a602866528e0ae9511889c65b61991ad9ddfd8b.pdf", "supplementary_material": "", "code": "", "data": "", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 2 code implementations](https://www.catalyzex.com/paper/heteroscedastic-temporal-variational/code)", "_bibtex": "@inproceedings{\nshukla2022heteroscedastic,\ntitle={Heteroscedastic Temporal Variational Autoencoder For Irregularly Sampled Time Series},\nauthor={Satya Narayan Shukla and Benjamin Marlin},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=Az7opqbQE-3}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 16}} +{"id": "vUH85MOXO7h", "original": "Eepw3MEpF2O9", "number": 4162, "cdate": 1632875734740, "mdate": null, "ddate": null, "tcdate": 1632875734740, "tmdate": 1750551551835, "tddate": null, "forum": "vUH85MOXO7h", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "A Neural Tangent Kernel Perspective of Infinite Tree Ensembles", "authorids": ["~Ryuichi_Kanoh1", "~Mahito_Sugiyama1"], "authors": ["Ryuichi Kanoh", "Mahito Sugiyama"], "keywords": ["Neural Tangent Kernel", "Tree Ensemble", "Soft Tree"], "abstract": "In practical situations, the tree ensemble is one of the most popular models along with neural networks. A soft tree is a variant of a decision tree. Instead of using a greedy method for searching splitting rules, the soft tree is trained using a gradient method in which the entire splitting operation is formulated in a differentiable form. Although ensembles of such soft trees have been used increasingly in recent years, little theoretical work has been done to understand their behavior. By considering an ensemble of infinite soft trees, this paper introduces and studies the Tree Neural Tangent Kernel (TNTK), which provides new insights into the behavior of the infinite ensemble of soft trees. Using the TNTK, we theoretically identify several non-trivial properties, such as global convergence of the training, the equivalence of the oblivious tree structure, and the degeneracy of the TNTK induced by the deepening of the trees.", "one-sentence_summary": "By considering an ensemble of infinite trees, we introduce and study the Tree Neural Tangent Kernel (TNTK), which provides new insights into the behavior of the infinite ensemble of soft trees.", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "kanoh|a_neural_tangent_kernel_perspective_of_infinite_tree_ensembles", "pdf": "/pdf/39b3d2b8700abc51932e7eea69ff8d0868dc2be8.pdf", "supplementary_material": "/attachment/4dbd79f04fb1a962d127105279eb63dee09e444f.zip", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 1 code implementation](https://www.catalyzex.com/paper/a-neural-tangent-kernel-perspective-of/code)", "_bibtex": "@inproceedings{\nkanoh2022a,\ntitle={A Neural Tangent Kernel Perspective of Infinite Tree Ensembles},\nauthor={Ryuichi Kanoh and Mahito Sugiyama},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=vUH85MOXO7h}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 8}} +{"id": "nKWjE4QF1hB", "original": "SHRrmv-vJYeB", "number": 4161, "cdate": 1632875734675, "mdate": null, "ddate": null, "tcdate": 1632875734675, "tmdate": 1676330465603, "tddate": null, "forum": "nKWjE4QF1hB", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "AlphaZero-based Proof Cost Network to Aid Game Solving", "authorids": ["~Ti-Rong_Wu1", "~Chung-Chin_Shih2", "~Ting_Han_Wei1", "~Meng-Yu_Tsai1", "~Wei-Yuan_Hsu1", "~I-Chen_Wu3"], "authors": ["Ti-Rong Wu", "Chung-Chin Shih", "Ting Han Wei", "Meng-Yu Tsai", "Wei-Yuan Hsu", "I-Chen Wu"], "keywords": ["Monte-Carlo Tree Search", "Solving Games", "AlphaZero", "Deep Reinforcement Learning"], "abstract": "The AlphaZero algorithm learns and plays games without hand-crafted expert knowledge. However, since its objective is to play well, we hypothesize that a better objective can be defined for the related but separate task of solving games. This paper proposes a novel approach to solving problems by modifying the training target of the AlphaZero algorithm, such that it prioritizes solving the game quickly, rather than winning. We train a Proof Cost Network (PCN), where proof cost is a heuristic that estimates the amount of work required to solve problems. This matches the general concept of the so-called proof number from proof number search, which has been shown to be well-suited for game solving. We propose two specific training targets. The first finds the shortest path to a solution, while the second estimates the proof cost. We conduct experiments on solving 15x15 Gomoku and 9x9 Killall-Go problems with both MCTS-based and FDFPN solvers. Comparisons between using AlphaZero networks and PCN as heuristics show that PCN can solve more problems.", "one-sentence_summary": "This paper proposes a novel approach to solving problems by modifying the training target of the AlphaZero algorithm, such that it prioritizes solving the game quickly, rather than winning.", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "wu|alphazerobased_proof_cost_network_to_aid_game_solving", "pdf": "/pdf/b5c23474ea991857d67e3e750bb82c36a669b2e9.pdf", "supplementary_material": "/attachment/74372572d1441e09d2444d5562c08ab56db2eaa7.zip", "_bibtex": "@inproceedings{\nwu2022alphazerobased,\ntitle={AlphaZero-based Proof Cost Network to Aid Game Solving},\nauthor={Ti-Rong Wu and Chung-Chin Shih and Ting Han Wei and Meng-Yu Tsai and Wei-Yuan Hsu and I-Chen Wu},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=nKWjE4QF1hB}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 27}} +{"id": "f2lrIbGx3x7", "original": "KwNrBNQrlLs", "number": 4155, "cdate": 1632875734272, "mdate": null, "ddate": null, "tcdate": 1632875734272, "tmdate": 1750551552004, "tddate": null, "forum": "f2lrIbGx3x7", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "Bayesian Framework for Gradient Leakage", "authorids": ["~Mislav_Balunovic1", "~Dimitar_Iliev_Dimitrov2", "~Robin_Staab1", "~Martin_Vechev1"], "authors": ["Mislav Balunovic", "Dimitar Iliev Dimitrov", "Robin Staab", "Martin Vechev"], "keywords": ["federated learning", "privacy", "gradient leakage"], "abstract": "Federated learning is an established method for training machine learning models without sharing training data. However, recent work has shown that it cannot guarantee data privacy as shared gradients can still leak sensitive information. To formalize the problem of gradient leakage, we propose a theoretical framework that enables, for the first time, analysis of the Bayes optimal adversary phrased as an optimization problem. We demonstrate that existing leakage attacks can be seen as approximations of this optimal adversary with different assumptions on the probability distributions of the input data and gradients. Our experiments confirm the effectiveness of the Bayes optimal adversary when it has knowledge of the underlying distribution. Further, our experimental evaluation shows that several existing heuristic defenses are not effective against stronger attacks, especially early in the training process. Thus, our findings indicate that the construction of more effective defenses and their evaluation remains an open problem.\n", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "balunovic|bayesian_framework_for_gradient_leakage", "pdf": "/pdf/4e51a98c83f488bc5362a078c71216dab544be00.pdf", "one-sentence_summary": "We propose a theoretical framework for analysis of the Bayes optimal adversary for gradient leakage, and perform evaluation of existing defenses.", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 1 code implementation](https://www.catalyzex.com/paper/bayesian-framework-for-gradient-leakage/code)", "_bibtex": "@inproceedings{\nbalunovic2022bayesian,\ntitle={Bayesian Framework for Gradient Leakage},\nauthor={Mislav Balunovic and Dimitar Iliev Dimitrov and Robin Staab and Martin Vechev},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=f2lrIbGx3x7}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 18}} +{"id": "YpPiNigTzMT", "original": "XPViQg8oD_l", "number": 4154, "cdate": 1632875734203, "mdate": null, "ddate": null, "tcdate": 1632875734203, "tmdate": 1676330465793, "tddate": null, "forum": "YpPiNigTzMT", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "Universalizing Weak Supervision", "authorids": ["~Changho_Shin2", "~Winfred_Li1", "~Harit_Vishwakarma1", "~Nicholas_Carl_Roberts1", "~Frederic_Sala1"], "authors": ["Changho Shin", "Winfred Li", "Harit Vishwakarma", "Nicholas Carl Roberts", "Frederic Sala"], "keywords": ["Weak supervision"], "abstract": "Weak supervision (WS) frameworks are a popular way to bypass hand-labeling large datasets for training data-hungry models.\nThese approaches synthesize multiple noisy but cheaply-acquired estimates of labels into a set of high-quality pseudo-labels for downstream training. However, the synthesis technique is specific to a particular kind of label, such as binary labels or sequences, and each new label type requires manually designing a new synthesis algorithm. Instead, we propose a universal technique that enables weak supervision over any label type while still offering desirable properties, including practical flexibility, computational efficiency, and theoretical guarantees. We apply this technique to important problems previously not tackled by WS frameworks including learning to rank, regression, and learning in hyperbolic space. Theoretically, our synthesis approach produces a consistent estimators for learning some challenging but important generalizations of the exponential family model. Experimentally, we validate our framework and show improvement over baselines in diverse settings including real-world learning-to-rank and regression problems along with learning on hyperbolic manifolds.", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "shin|universalizing_weak_supervision", "pdf": "/pdf/a2adc08eeb52dcddf2563c7bb42940946813b522.pdf", "one-sentence_summary": "We extend weak supervision frameworks to new settings — rankings, regression, Riemannian spaces, and more — with a universal algorithm with theoretical guarantees. ", "supplementary_material": "/attachment/fd1ee4735ecb1725affb1b44878a640320daa2d4.zip", "_bibtex": "@inproceedings{\nshin2022universalizing,\ntitle={Universalizing Weak Supervision},\nauthor={Changho Shin and Winfred Li and Harit Vishwakarma and Nicholas Carl Roberts and Frederic Sala},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=YpPiNigTzMT}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 12}} +{"id": "ULfq0qR25dY", "original": "FTsVDQC3Wo9", "number": 4153, "cdate": 1632875734136, "mdate": null, "ddate": null, "tcdate": 1632875734136, "tmdate": 1750551552248, "tddate": null, "forum": "ULfq0qR25dY", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "Maximum n-times Coverage for Vaccine Design", "authorids": ["~Ge_Liu2", "~Alexander_Dimitrakakis1", "~Brandon_Carter1", "~David_Gifford1"], "authors": ["Ge Liu", "Alexander Dimitrakakis", "Brandon Carter", "David Gifford"], "keywords": ["computational biology", "vaccine design", "COVID-19", "maximum n-times coverage", "combinatorial optimization", "integer linear programming"], "abstract": "We introduce the maximum $n$-times coverage problem that selects $k$ overlays to maximize the summed coverage of weighted elements, where each element must be covered at least $n$ times. We also define the min-cost $n$-times coverage problem where the objective is to select the minimum set of overlays such that the sum of the weights of elements that are covered at least $n$ times is at least $\\tau$. Maximum $n$-times coverage is a generalization of the multi-set multi-cover problem, is NP-complete, and is not submodular. We introduce two new practical solutions for $n$-times coverage based on integer linear programming and sequential greedy optimization. We show that maximum $n$-times coverage is a natural way to frame peptide vaccine design, and find that it produces a pan-strain COVID-19 vaccine design that is superior to 29 other published designs in predicted population coverage and the expected number of peptides displayed by each individual's HLA molecules.", "one-sentence_summary": "We introduce the maximum $n$-times coverage problem that selects $k$ overlays to maximize the summed coverage of weighted elements, where each element must be covered at least $n$ times, and show its importance for vaccine design.", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "liu|maximum_ntimes_coverage_for_vaccine_design", "pdf": "/pdf/9d61f13ecd3d02a7e3ed6243e5e82f05c5f456cf.pdf", "supplementary_material": "/attachment/5e8db7a31cf8f7eef84a53079c32f0d8e8efb459.zip", "code": "", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 1 code implementation](https://www.catalyzex.com/paper/arxiv:2101.10902/code)", "_bibtex": "@inproceedings{\nliu2022maximum,\ntitle={Maximum n-times Coverage for Vaccine Design},\nauthor={Ge Liu and Alexander Dimitrakakis and Brandon Carter and David Gifford},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=ULfq0qR25dY}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 12}} +{"id": "0JzqUlIVVDd", "original": "JeUCSh5Eci-6", "number": 4148, "cdate": 1632875733790, "mdate": null, "ddate": null, "tcdate": 1632875733790, "tmdate": 1750551552369, "tddate": null, "forum": "0JzqUlIVVDd", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "KL Guided Domain Adaptation", "authorids": ["~A._Tuan_Nguyen1", "~Toan_Tran1", "~Yarin_Gal1", "~Philip_Torr1", "~Atilim_Gunes_Baydin1"], "authors": ["A. Tuan Nguyen", "Toan Tran", "Yarin Gal", "Philip Torr", "Atilim Gunes Baydin"], "keywords": ["domain adaptation", "invariant representation"], "abstract": "Domain adaptation is an important problem and often needed for real-world applications. In this problem, instead of i.i.d. training and testing datapoints, we assume that the source (training) data and the target (testing) data have different distributions. With that setting, the empirical risk minimization training procedure often does not perform well, since it does not account for the change in the distribution. A common approach in the domain adaptation literature is to learn a representation of the input that has the same (marginal) distribution over the source and the target domain. However, these approaches often require additional networks and/or optimizing an adversarial (minimax) objective, which can be very expensive or unstable in practice. To improve upon these marginal alignment techniques, in this paper, we first derive a generalization bound for the target loss based on the training loss and the reverse Kullback-Leibler (KL) divergence between the source and the target representation distributions. Based on this bound, we derive an algorithm that minimizes the KL term to obtain a better generalization to the target domain. We show that with a probabilistic representation network, the KL term can be estimated efficiently via minibatch samples without any additional network or a minimax objective. This leads to a theoretically sound alignment method which is also very efficient and stable in practice. Experimental results also suggest that our method outperforms other representation-alignment approaches.", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "nguyen|kl_guided_domain_adaptation", "pdf": "/pdf/943a05167d50e4a4de4e6c043f7c7e6374502f72.pdf", "one-sentence_summary": "We derive a generalization bound for the domain adaptation problem based on the reversed KL divergence, and propose to regularize the KL term to lower the generalization bound.", "supplementary_material": "/attachment/1db7dd3c325b238240272c2a63bfa0ca9f38dbc8.zip", "data": "", "code": "", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 1 code implementation](https://www.catalyzex.com/paper/kl-guided-domain-adaptation/code)", "_bibtex": "@inproceedings{\nnguyen2022kl,\ntitle={{KL} Guided Domain Adaptation},\nauthor={A. Tuan Nguyen and Toan Tran and Yarin Gal and Philip Torr and Atilim Gunes Baydin},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=0JzqUlIVVDd}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 16}} +{"id": "Mspk_WYKoEH", "original": "v8mZaiCg9ut", "number": 4144, "cdate": 1632875733523, "mdate": null, "ddate": null, "tcdate": 1632875733523, "tmdate": 1750551552538, "tddate": null, "forum": "Mspk_WYKoEH", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "From Stars to Subgraphs: Uplifting Any GNN with Local Structure Awareness", "authorids": ["~Lingxiao_Zhao1", "~Wei_Jin4", "~Leman_Akoglu3", "~Neil_Shah2"], "authors": ["Lingxiao Zhao", "Wei Jin", "Leman Akoglu", "Neil Shah"], "keywords": ["Graph Neural Networks", "Expressiveness", "Message Passing Neural Network", "Graph Classification"], "abstract": "Message Passing Neural Networks (MPNNs) are a common type of Graph Neural Network (GNN), in which each node’s representation is computed recursively by aggregating representations (“messages”) from its immediate neighbors akin to a star-shaped pattern. MPNNs are appealing for being efficient and scalable, however their expressiveness is upper-bounded by the 1st-order Weisfeiler-Lehman isomorphism test (1-WL). In response, prior works propose highly expressive models at the cost of scalability and sometimes generalization performance. Our work stands between these two regimes: we introduce a general framework to uplift any MPNN to be more expressive, with limited scalability overhead and greatly improved practical performance. We achieve this by extending local aggregation in MPNNs from star patterns to general subgraph patterns (e.g., k-egonets): in our framework, each node representation is computed as the encoding of a surrounding induced subgraph rather than encoding of immediate neighbors only (i.e. a star). We choose the subgraph encoder to be a GNN (mainly MPNNs, considering scalability) to design a general framework that serves as a wrapper to uplift any GNN. We call our proposed method GNN-AK (GNN As Kernel), as the framework resembles a convolutional neural network by replacing the kernel with\nGNNs. Theoretically, we show that our framework is strictly more powerful than 1&2-WL, and is not less powerful than 3-WL. We also design subgraph sampling strategies which greatly reduce memory footprint and improve speed while maintaining performance. Our method sets new state-of-the-art performance by large margins for several well-known graph ML tasks; specifically, 0.08 MAE on ZINC,\n74.79% and 86.887% accuracy on CIFAR10 and PATTERN respectively.", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "zhao|from_stars_to_subgraphs_uplifting_any_gnn_with_local_structure_awareness", "pdf": "/pdf/cc341ac588b917bee10fc4d5bb31b4a119b6108b.pdf", "code": "", "data": "", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 1 code implementation](https://www.catalyzex.com/paper/from-stars-to-subgraphs-uplifting-any-gnn/code)", "_bibtex": "@inproceedings{\nzhao2022from,\ntitle={From Stars to Subgraphs: Uplifting Any {GNN} with Local Structure Awareness},\nauthor={Lingxiao Zhao and Wei Jin and Leman Akoglu and Neil Shah},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=Mspk_WYKoEH}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 29}} +{"id": "-8sBpe7rDiV", "original": "TGWCq7it-RV", "number": 4142, "cdate": 1632875733387, "mdate": null, "ddate": null, "tcdate": 1632875733387, "tmdate": 1750551552614, "tddate": null, "forum": "-8sBpe7rDiV", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "NETWORK INSENSITIVITY TO PARAMETER NOISE VIA PARAMETER ATTACK DURING TRAINING", "authorids": ["~Julian_Büchel1", "~Fynn_Firouz_Faber1", "~Dylan_Richard_Muir1"], "authors": ["Julian Büchel", "Fynn Firouz Faber", "Dylan Richard Muir"], "keywords": ["parameter attack", "adversarial attack", "neural network", "deep learning", "optimisation", "neuromorphic processor"], "abstract": "Neuromorphic neural network processors, in the form of compute-in-memory crossbar arrays of memristors, or in the form of subthreshold analog and mixed-signal ASICs, promise enormous advantages in compute density and energy efficiency for NN-based ML tasks. However, these technologies are prone to computational non-idealities, due to process variation and intrinsic device physics. This degrades the task performance of networks deployed to the processor, by introducing parameter noise into the deployed model. While it is possible to calibrate each device, or train networks individually for each processor, these approaches are expensive and impractical for commercial deployment. Alternative methods are therefore needed to train networks that are inherently robust against parameter variation, as a consequence of network architecture and parameters. We present a new network training algorithm that attacks network parameters during training, and promotes robust performance during inference in the face of random parameter variation. Our approach introduces a loss regularization term that penalizes the susceptibility of a network to weight perturbation. We compare against previous approaches for producing parameter insensitivity such as dropout, weight smoothing and introducing parameter noise during training. We show that our approach produces models that are more robust to random mismatch-induced parameter variation as well as to targeted parameter variation. Our approach finds minima in flatter locations in the weight-loss landscape compared with other approaches, highlighting that the networks found by our technique are less sensitive to parameter perturbation. Our work provides an approach to deploy neural network architectures to inference devices that suffer from computational non-idealities, with minimal loss of performance. This method will enable deployment at scale to novel energy-efficient computational substrates, promoting cheaper and more prevalent edge inference.", "one-sentence_summary": "We flatten the weight loss-landscape by introducing a parameter attack term in the loss function and demonstrate improved network insensitivity to noise common in analog neuromorphic hardware.", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "büchel|network_insensitivity_to_parameter_noise_via_parameter_attack_during_training", "pdf": "/pdf/b7b77ce8535702dba33084aa20eb08cae53193f4.pdf", "supplementary_material": "/attachment/7ffbeeb53e08296882cdec5ad8afc7050521ca29.zip", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 3 code implementations](https://www.catalyzex.com/paper/network-insensitivity-to-parameter-noise-via/code)", "_bibtex": "@inproceedings{\nb{\\\"u}chel2022network,\ntitle={{NETWORK} {INSENSITIVITY} {TO} {PARAMETER} {NOISE} {VIA} {PARAMETER} {ATTACK} {DURING} {TRAINING}},\nauthor={Julian B{\\\"u}chel and Fynn Firouz Faber and Dylan Richard Muir},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=-8sBpe7rDiV}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 14}} +{"id": "fXHl76nO2AZ", "original": "Bbkp5ZkRU5", "number": 4135, "cdate": 1632875732906, "mdate": null, "ddate": null, "tcdate": 1632875732906, "tmdate": 1676330468615, "tddate": null, "forum": "fXHl76nO2AZ", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "Gradient Importance Learning for Incomplete Observations", "authorids": ["~Qitong_Gao1", "~Dong_Wang2", "~Joshua_David_Amason1", "~Siyang_Yuan1", "~Chenyang_Tao1", "~Ricardo_Henao1", "~Majda_Hadziahmetovic1", "~Lawrence_Carin2", "~Miroslav_Pajic2"], "authors": ["Qitong Gao", "Dong Wang", "Joshua David Amason", "Siyang Yuan", "Chenyang Tao", "Ricardo Henao", "Majda Hadziahmetovic", "Lawrence Carin", "Miroslav Pajic"], "keywords": ["Missing Data", "Reinforcement Learning", "Representation Learning"], "abstract": "Though recent works have developed methods that can generate estimates (or imputations) of the missing entries in a dataset to facilitate downstream analysis, most depend on assumptions that may not align with real-world applications and could suffer from poor performance in subsequent tasks such as classification. This is particularly true if the data have large missingness rates or a small sample size. More importantly, the imputation error could be propagated into the prediction step that follows, which may constrain the capabilities of the prediction model. In this work, we introduce the gradient importance learning (GIL) method to train multilayer perceptrons (MLPs) and long short-term memories (LSTMs) to directly perform inference from inputs containing missing values without imputation. Specifically, we employ reinforcement learning (RL) to adjust the gradients used to train these models via back-propagation. This allows the model to exploit the underlying information behind missingness patterns. We test the approach on real-world time-series (i.e., MIMIC-III), tabular data obtained from an eye clinic, and a standard dataset (i.e., MNIST), where our imputation-free predictions outperform the traditional two-step imputation-based predictions using state-of-the-art imputation methods.", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "gao|gradient_importance_learning_for_incomplete_observations", "pdf": "/pdf/77f82d36ef5cbde5647d6e9f7fb7dd38ce4e2a91.pdf", "supplementary_material": "/attachment/56abd6641e0629e6045cfeb1dd02edc87f195900.zip", "_bibtex": "@inproceedings{\ngao2022gradient,\ntitle={Gradient Importance Learning for Incomplete Observations},\nauthor={Qitong Gao and Dong Wang and Joshua David Amason and Siyang Yuan and Chenyang Tao and Ricardo Henao and Majda Hadziahmetovic and Lawrence Carin and Miroslav Pajic},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=fXHl76nO2AZ}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 18}} +{"id": "v6s3HVjPerv", "original": "yWvQNY096plE", "number": 4125, "cdate": 1632875732231, "mdate": null, "ddate": null, "tcdate": 1632875732231, "tmdate": 1750551553534, "tddate": null, "forum": "v6s3HVjPerv", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "Do Users Benefit From Interpretable Vision? A User Study, Baseline, And Dataset", "authorids": ["~Leon_Sixt1", "~Martin_Schuessler1", "~Oana-Iuliana_Popescu1", "philipp@itp.tu-berlin.de", "~Tim_Landgraf1"], "authors": ["Leon Sixt", "Martin Schuessler", "Oana-Iuliana Popescu", "Philipp Weiß", "Tim Landgraf"], "keywords": ["Interpretable ML", "User Study", "Human Subject Evaluation", "Invertible Neural Networks", "Convolutional Networks"], "abstract": "A variety of methods exist to explain image classification models. However, whether they provide any benefit to users over simply comparing various inputs and the model’s respective predictions remains unclear. We conducted a user study (N=240) to test how such a baseline explanation technique performs against concept-based and counterfactual explanations. To this end, we contribute a synthetic dataset generator capable of biasing individual attributes and quantifying their relevance to the model. In a study, we assess if participants can identify the relevant set of attributes compared to the ground-truth. Our results show that the baseline outperformed concept-based explanations. Counterfactual explanations from an invertible neural network performed similarly as the baseline. Still, they allowed users to identify some attributes more accurately. Our results highlight the importance of measuring how well users can reason about biases of a model, rather than solely relying on technical evaluations or proxy tasks. We open-source our study and dataset so it can serve as a blue-print for future studies.", "one-sentence_summary": "Do Users Benefit From Interpretable Vision? A User Study, Baseline, And Dataset", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "sixt|do_users_benefit_from_interpretable_vision_a_user_study_baseline_and_dataset", "pdf": "/pdf/49e3023b785924a7159ee756c546ac2ec523e8ea.pdf", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 1 code implementation](https://www.catalyzex.com/paper/do-users-benefit-from-interpretable-vision-a/code)", "_bibtex": "@inproceedings{\nsixt2022do,\ntitle={Do Users Benefit From Interpretable Vision? A User Study, Baseline, And Dataset},\nauthor={Leon Sixt and Martin Schuessler and Oana-Iuliana Popescu and Philipp Wei{\\ss} and Tim Landgraf},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=v6s3HVjPerv}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 18}} +{"id": "Qycd9j5Qp9J", "original": "quySScEQm8_", "number": 4117, "cdate": 1632875731750, "mdate": null, "ddate": null, "tcdate": 1632875731750, "tmdate": 1676330470680, "tddate": null, "forum": "Qycd9j5Qp9J", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "Understanding the Variance Collapse of SVGD in High Dimensions", "authorids": ["~Jimmy_Ba1", "~Murat_A_Erdogdu1", "~Marzyeh_Ghassemi2", "~Shengyang_Sun4", "~Taiji_Suzuki1", "~Denny_Wu2", "~Tianzong_Zhang1"], "authors": ["Jimmy Ba", "Murat A Erdogdu", "Marzyeh Ghassemi", "Shengyang Sun", "Taiji Suzuki", "Denny Wu", "Tianzong Zhang"], "keywords": ["Stein Variational Gradient Descent", "Approximate Inference", "Particle-based Variational Inference"], "abstract": "Stein variational gradient descent (SVGD) is a deterministic inference algorithm that evolves a set of particles to fit a target distribution. Despite its computational efficiency, SVGD often underestimates the variance of the target distribution in high dimensions. In this work we attempt to explain the variance collapse in SVGD. On the qualitative side, we compare the SVGD update with gradient descent on the maximum mean discrepancy (MMD) objective; we observe that the variance collapse phenomenon relates to the bias from deterministic updates present in the \"driving force\" of SVGD, and empirically verify that removal of such bias leads to more accurate variance estimation. On the quantitative side, we demonstrate that the variance collapse of SVGD can be accurately predicted in the proportional asymptotic limit, i.e., when the number of particles $n$ and dimensions $d$ diverge at the same rate. In particular, for learning high-dimensional isotropic Gaussians, we derive the exact equilibrium variance for both SVGD and MMD-descent under certain near-orthogonality assumption on the converged particles, and confirm that SVGD suffers from the \"curse of dimensionality\".", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "ba|understanding_the_variance_collapse_of_svgd_in_high_dimensions", "pdf": "/pdf/71e77dab5447ab6226d0f2e58132575f2217dc3b.pdf", "one-sentence_summary": "Qualitative and quantitative analysis of the variance collapse phenomenon of SVGD in high dimensions. ", "_bibtex": "@inproceedings{\nba2022understanding,\ntitle={Understanding the Variance Collapse of {SVGD} in High Dimensions},\nauthor={Jimmy Ba and Murat A Erdogdu and Marzyeh Ghassemi and Shengyang Sun and Taiji Suzuki and Denny Wu and Tianzong Zhang},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=Qycd9j5Qp9J}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 10}} +{"id": "ZOcX-eybqoL", "original": "_ZQ6F0UmSQ", "number": 4105, "cdate": 1632875730934, "mdate": null, "ddate": null, "tcdate": 1632875730934, "tmdate": 1676330470784, "tddate": null, "forum": "ZOcX-eybqoL", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "Generalisation in Lifelong Reinforcement Learning through Logical Composition ", "authorids": ["~Geraud_Nangue_Tasse1", "~Steven_James1", "~Benjamin_Rosman1"], "authors": ["Geraud Nangue Tasse", "Steven James", "Benjamin Rosman"], "keywords": ["Reinforcement Learning", "Lifelong learning", "Multi task learning", "Transfer learning", "Logical composition", "Deep Reinforcement Learning"], "abstract": "We leverage logical composition in reinforcement learning to create a framework that enables an agent to autonomously determine whether a new task can be immediately solved using its existing abilities, or whether a task-specific skill should be learned. In the latter case, the proposed algorithm also enables the agent to learn the new task faster by generating an estimate of the optimal policy. Importantly, we provide two main theoretical results: we bound the performance of the transferred policy on a new task, and we give bounds on the necessary and sufficient number of tasks that need to be learned throughout an agent's lifetime to generalise over a distribution. We verify our approach in a series of experiments, where we perform transfer learning both after learning a set of base tasks, and after learning an arbitrary set of tasks. We also demonstrate that, as a side effect of our transfer learning approach, an agent can produce an interpretable Boolean expression of its understanding of the current task. Finally, we demonstrate our approach in the full lifelong setting where an agent receives tasks from an unknown distribution. Starting from scratch, an agent is able to quickly generalise over the task distribution after learning only a few tasks, which are sub-logarithmic in the size of the task space.", "pdf": "/pdf/89cb79a9b9bb6a9a833a7a8ae73c8c5a87792970.pdf", "one-sentence_summary": "A framework with theoretical guarantees for an agent to quickly generalize over a task space by autonomously determining whether a new task can be solved zero-shot using existing skills, or whether a task-specific skill should be learned few-shot.", "supplementary_material": "/attachment/73712897744101e96675f8da95110567d360b743.zip", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "tasse|generalisation_in_lifelong_reinforcement_learning_through_logical_composition", "_bibtex": "@inproceedings{\ntasse2022generalisation,\ntitle={Generalisation in Lifelong Reinforcement Learning through Logical Composition },\nauthor={Geraud Nangue Tasse and Steven James and Benjamin Rosman},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=ZOcX-eybqoL}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 40}} +{"id": "gSdSJoenupI", "original": "skLlmxkeHl1", "number": 4102, "cdate": 1632875730732, "mdate": null, "ddate": null, "tcdate": 1632875730732, "tmdate": 1750551553931, "tddate": null, "forum": "gSdSJoenupI", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "PolyLoss: A Polynomial Expansion Perspective of Classification Loss Functions", "authorids": ["~Zhaoqi_Leng1", "~Mingxing_Tan3", "~Chenxi_Liu1", "~Ekin_Dogus_Cubuk1", "~Jay_Shi1", "~Shuyang_Cheng1", "~Dragomir_Anguelov1"], "authors": ["Zhaoqi Leng", "Mingxing Tan", "Chenxi Liu", "Ekin Dogus Cubuk", "Jay Shi", "Shuyang Cheng", "Dragomir Anguelov"], "keywords": ["classification", "computer vision", "loss"], "abstract": "Cross-entropy loss and focal loss are the most common choices when training deep neural networks for classification problems. Generally speaking, however, a good loss function can take on much more flexible forms, and should be tailored for different tasks and datasets. Motivated by how functions can be approximated via Taylor expansion, we propose a simple framework, named PolyLoss, to view and design loss functions as a linear combination of polynomial functions. Our PolyLoss allows the importance of different polynomial bases to be easily adjusted depending on the targeting tasks and datasets, while naturally subsuming the aforementioned cross-entropy loss and focal loss as special cases. Extensive experimental results show that the optimal choice within the PolyLoss is indeed dependent on the task and dataset. Simply by introducing one extra hyperparameter and adding one line of code, our Poly-1 formulation outperforms the cross-entropy loss and focal loss on 2D image classification, instance segmentation, object detection, and 3D object detection tasks, sometimes by a large margin.", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "leng|polyloss_a_polynomial_expansion_perspective_of_classification_loss_functions", "pdf": "/pdf/d1430448cff98fb37273293f39735ba9c6a4313a.pdf", "one-sentence_summary": "In the PolyLoss framework, we propose a simple and effective Poly-1 formulation which outperforms the cross-entropy loss and focal loss on various of tasks.", "data": "", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 3 code implementations](https://www.catalyzex.com/paper/polyloss-a-polynomial-expansion-perspective/code)", "_bibtex": "@inproceedings{\nleng2022polyloss,\ntitle={PolyLoss: A Polynomial Expansion Perspective of Classification Loss Functions},\nauthor={Zhaoqi Leng and Mingxing Tan and Chenxi Liu and Ekin Dogus Cubuk and Jay Shi and Shuyang Cheng and Dragomir Anguelov},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=gSdSJoenupI}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 17}} +{"id": "I2Hw58KHp8O", "original": "xo9fxr0D6wK", "number": 4101, "cdate": 1632875730666, "mdate": null, "ddate": null, "tcdate": 1632875730666, "tmdate": 1750551553944, "tddate": null, "forum": "I2Hw58KHp8O", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "Improving Non-Autoregressive Translation Models Without Distillation", "authorids": ["~Xiao_Shi_Huang1", "~Felipe_Perez1", "~Maksims_Volkovs3"], "authors": ["Xiao Shi Huang", "Felipe Perez", "Maksims Volkovs"], "keywords": ["Natural Language Processing", "Deep Learning", "Non-autoregressive Machine Translation", "Transformer", "Distillation"], "abstract": "Transformer-based autoregressive (AR) machine translation models have achieved significant performance improvements, nearing human-level accuracy on some languages. The AR framework translates one token at a time which can be time consuming, especially for long sequences. To accelerate inference, recent work has been exploring non-autoregressive (NAR) approaches that translate blocks of tokens in parallel. Despite significant progress, leading NAR models still lag behind their AR counterparts, and only become competitive when trained with distillation. In this paper we investigate possible reasons behind this performance gap, namely, the indistinguishability of tokens, and mismatch between training and inference. We then propose the Conditional Masked Language Model with Correction (CMLMC) that addresses these problems. Empirically, we show that CMLMC achieves state-of-the-art NAR performance when trained on raw data without distillation and approaches AR performance on multiple datasets. Full code for this work will be released at the time of publication.", "one-sentence_summary": "Improving the CMLM non-autoregressive machine translation model so it trains without knowledge distillation and achieves SOTA BLEU score on both raw and distilled dataset", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "huang|improving_nonautoregressive_translation_models_without_distillation", "pdf": "/pdf/fe5e18c9939f10295c39693c81d77b03816cad63.pdf", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 7 code implementations](https://www.catalyzex.com/paper/improving-non-autoregressive-translation/code)", "_bibtex": "@inproceedings{\nhuang2022improving,\ntitle={Improving Non-Autoregressive Translation Models Without Distillation},\nauthor={Xiao Shi Huang and Felipe Perez and Maksims Volkovs},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=I2Hw58KHp8O}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 11}} +{"id": "zzk231Ms1Ih", "original": "_vWu7gLdTrwb", "number": 4095, "cdate": 1632875730339, "mdate": null, "ddate": null, "tcdate": 1632875730339, "tmdate": 1676330471169, "tddate": null, "forum": "zzk231Ms1Ih", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "A Theory of Tournament Representations", "authorids": ["~Arun_Rajkumar4", "~Vishnu_Veerathu1", "cs20d400@smail.iitm.ac.in"], "authors": ["Arun Rajkumar", "Vishnu Veerathu", "Abdul Bakey Mir"], "keywords": ["tournament", "skew-symmetric", "pairwise ranking"], "abstract": "Real-world tournaments are almost always intransitive. Recent works have noted that parametric models which assume $d$ dimensional node representations can effectively model intransitive tournaments. However, nothing is known about the structure of the class of tournaments that arise out of any fixed $d$ dimensional representations. In this work, we develop a novel theory for understanding parametric tournament representations. Our first contribution is to structurally characterize the class of tournaments that arise out of $d$ dimensional representations. We do this by showing that these tournament classes have forbidden configurations that must necessarily be a union of flip classes, a novel way to partition the set of all tournaments. We further characterize rank $2$ tournaments completely by showing that the associated forbidden flip class contains just $2$ tournaments. Specifically, we show that the rank $2$ tournaments are equivalent to locally transitive tournaments. This insight allows us to show that the minimum feedback arc set problem on this tournament class can be solved using the standard Quicksort procedure. We also exhibit specific forbidden configurations for rank $4$ tournaments. For a general rank $d$ tournament class, we show that the flip class associated with a coned-doubly regular tournament of size $\\mathcal{O}(\\sqrt{d})$ must be a forbidden configuration. To answer a dual question, using a celebrated result of Froster, we show a lower bound of $\\Theta(\\sqrt{n})$ on the minimum dimension needed to represent all tournaments on $n$ nodes. For any given tournament, we show a novel upper bound on the smallest representation dimension that depends on the least size of the number of unique nodes in any feedback arc set of the flip class associated with a tournament. We show how our results also shed light on the upper bound of sign-rank of matrices. ", "one-sentence_summary": "We develop a theory to understand tournament representations i.e. structurally characterise when a tournament graph can be represented in lower dimensions using a skew symmetric matrix. ", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "rajkumar|a_theory_of_tournament_representations", "pdf": "/pdf/a7853d8c301f8a37bc858f4c428d73862dabff26.pdf", "_bibtex": "@inproceedings{\nrajkumar2022a,\ntitle={A Theory of Tournament Representations},\nauthor={Arun Rajkumar and Vishnu Veerathu and Abdul Bakey Mir},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=zzk231Ms1Ih}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 12}} +{"id": "OJm3HZuj4r7", "original": "hJHbS1BIfZ71", "number": 4081, "cdate": 1632875729410, "mdate": null, "ddate": null, "tcdate": 1632875729410, "tmdate": 1676330472115, "tddate": null, "forum": "OJm3HZuj4r7", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "Convergent and Efficient Deep Q Learning Algorithm", "authorids": ["~Zhikang_T._Wang1", "~Masahito_Ueda1"], "authors": ["Zhikang T. Wang", "Masahito Ueda"], "keywords": ["DQN", "reinforcement learning", "convergence"], "abstract": "Despite the empirical success of the deep Q network (DQN) reinforcement learning algorithm and its variants, DQN is still not well understood and it does not guarantee convergence. In this work, we show that DQN can indeed diverge and cease to operate in realistic settings. Although there exist gradient-based convergent methods, we show that they actually have inherent problems in learning dynamics which cause them to fail even for simple tasks. To overcome these problems, we propose a convergent DQN algorithm (C-DQN) that is guaranteed to converge and can work with large discount factors (0.9998). It learns robustly in difficult settings and can learn several difficult games in the Atari 2600 benchmark that DQN fails to solve.", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "wang|convergent_and_efficient_deep_q_learning_algorithm", "pdf": "/pdf/d999c3cb704da4722ea5330b5dd48600eb9c4ef4.pdf", "supplementary_material": "/attachment/7414afa6296afdedebef45a3e500076a47622090.zip", "_bibtex": "@inproceedings{\nwang2022convergent,\ntitle={Convergent and Efficient Deep Q Learning Algorithm},\nauthor={Zhikang T. Wang and Masahito Ueda},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=OJm3HZuj4r7}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 14}} +{"id": "TXsjU8BaibT", "original": "7zqBGZ_qFoV", "number": 4078, "cdate": 1632875729209, "mdate": null, "ddate": null, "tcdate": 1632875729209, "tmdate": 1750551554475, "tddate": null, "forum": "TXsjU8BaibT", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "Trigger Hunting with a Topological Prior for Trojan Detection", "authorids": ["~Xiaoling_Hu1", "~Xiao_Lin2", "~Michael_Cogswell1", "~Yi_Yao1", "~Susmit_Jha1", "~Chao_Chen1"], "authors": ["Xiaoling Hu", "Xiao Lin", "Michael Cogswell", "Yi Yao", "Susmit Jha", "Chao Chen"], "keywords": ["Trojan detection", "diversity loss", "topological prior"], "abstract": "Despite their success and popularity, deep neural networks (DNNs) are vulnerable when facing backdoor attacks. This impedes their wider adoption, especially in mission critical applications. This paper tackles the problem of Trojan detection, namely, identifying Trojaned models – models trained with poisoned data. One popular approach is reverse engineering, i.e., recovering the triggers on a clean image by manipulating the model’s prediction. One major challenge of reverse engineering approach is the enormous search space of triggers. To this end, we propose innovative priors such as diversity and topological simplicity to not only increase the chances of finding the appropriate triggers but also improve the quality of the found triggers. Moreover, by encouraging a diverse set of trigger candidates, our method can perform effectively in cases with unknown target labels. We demonstrate that these priors can significantly improve the quality of the recovered triggers, resulting in substantially improved Trojan detection accuracy as validated on both synthetic and publicly available TrojAI benchmarks.", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "hu|trigger_hunting_with_a_topological_prior_for_trojan_detection", "pdf": "/pdf/4db1d42d467c296c5ec7fa3f38e37dcb5c140e84.pdf", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 7 code implementations](https://www.catalyzex.com/paper/trigger-hunting-with-a-topological-prior-for/code)", "_bibtex": "@inproceedings{\nhu2022trigger,\ntitle={Trigger Hunting with a Topological Prior for Trojan Detection},\nauthor={Xiaoling Hu and Xiao Lin and Michael Cogswell and Yi Yao and Susmit Jha and Chao Chen},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=TXsjU8BaibT}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 16}} +{"id": "JM2kFbJvvI", "original": "yZON77tdmCqr", "number": 4077, "cdate": 1632875729145, "mdate": null, "ddate": null, "tcdate": 1632875729145, "tmdate": 1750551554501, "tddate": null, "forum": "JM2kFbJvvI", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "Who Is the Strongest Enemy? Towards Optimal and Efficient Evasion Attacks in Deep RL", "authorids": ["~Yanchao_Sun1", "~Ruijie_Zheng1", "~Yongyuan_Liang1", "~Furong_Huang1"], "authors": ["Yanchao Sun", "Ruijie Zheng", "Yongyuan Liang", "Furong Huang"], "keywords": ["adversarial RL", "robustness of RL", "evasion attack", "optimal attack", "observation perturbation"], "abstract": "Evaluating the worst-case performance of a reinforcement learning (RL) agent under the strongest/optimal adversarial perturbations on state observations (within some constraints) is crucial for understanding the robustness of RL agents. However, finding the optimal adversary is challenging, in terms of both whether we can find the optimal attack and how efficiently we can find it. Existing works on adversarial RL either use heuristics-based methods that may not find the strongest adversary, or directly train an RL-based adversary by treating the agent as a part of the environment, which can find the optimal adversary but may become intractable in a large state space. \nThis paper introduces a novel attacking method to find the optimal attacks through collaboration between a designed function named \"actor\" and an RL-based learner named \"director'\". The actor crafts state perturbations for a given policy perturbation direction, and the director learns to propose the best policy perturbation directions. Our proposed algorithm, PA-AD, is theoretically optimal and significantly more efficient than prior RL-based works in environments with large state spaces. Empirical results show that our proposed PA-AD universally outperforms state-of-the-art attacking methods in various Atari and MuJoCo environments. By applying PA-AD to adversarial training, we achieve state-of-the-art empirical robustness in multiple tasks under strong adversaries.", "one-sentence_summary": "We theoretically characterize the essence of evasion attacks in RL, and propose a novel attack algorithm for RL agents, which achieves state-of-the-art performance on both attacking and robustifying RL agents in many Atari and MuJoCo tasks.", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "sun|who_is_the_strongest_enemy_towards_optimal_and_efficient_evasion_attacks_in_deep_rl", "pdf": "/pdf/b11335ea1d1d4ca95531723261e11735e0550bc4.pdf", "supplementary_material": "/attachment/63f167f8a582b919bb82474e697ab07a0d31207c.zip", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 3 code implementations](https://www.catalyzex.com/paper/who-is-the-strongest-enemy-towards-optimal/code)", "_bibtex": "@inproceedings{\nsun2022who,\ntitle={Who Is the Strongest Enemy? Towards Optimal and Efficient Evasion Attacks in Deep {RL}},\nauthor={Yanchao Sun and Ruijie Zheng and Yongyuan Liang and Furong Huang},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=JM2kFbJvvI}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 18}} +{"id": "v3aeIsY_vVX", "original": "ANrAFgYWZLV", "number": 4068, "cdate": 1632875728565, "mdate": null, "ddate": null, "tcdate": 1632875728565, "tmdate": 1750551554848, "tddate": null, "forum": "v3aeIsY_vVX", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "Chunked Autoregressive GAN for Conditional Waveform Synthesis", "authorids": ["~Max_Morrison2", "~Rithesh_Kumar1", "~Kundan_Kumar1", "~Prem_Seetharaman1", "~Aaron_Courville3", "~Yoshua_Bengio1"], "authors": ["Max Morrison", "Rithesh Kumar", "Kundan Kumar", "Prem Seetharaman", "Aaron Courville", "Yoshua Bengio"], "keywords": ["audio generation", "speech synthesis", "deep learning", "generative models", "autoregression", "generative adversarial networks"], "abstract": "Conditional waveform synthesis models learn a distribution of audio waveforms given conditioning such as text, mel-spectrograms, or MIDI. These systems employ deep generative models that model the waveform via either sequential (autoregressive) or parallel (non-autoregressive) sampling. Generative adversarial networks (GANs) have become a common choice for non-autoregressive waveform synthesis. However, state-of-the-art GAN-based models produce artifacts when performing mel-spectrogram inversion. In this paper, we demonstrate that these artifacts correspond with an inability for the generator to learn accurate pitch and periodicity. We show that simple pitch and periodicity conditioning is insufficient for reducing this error relative to using autoregression. We discuss the inductive bias that autoregression provides for learning the relationship between instantaneous frequency and phase, and show that this inductive bias holds even when autoregressively sampling large chunks of the waveform during each forward pass. Relative to prior state-of-the-art GAN-based models, our proposed model, Chunked Autoregressive GAN (CARGAN) reduces pitch error by 40-60%, reduces training time by 58%, maintains a fast inference speed suitable for real-time or interactive applications, and maintains or improves subjective quality.", "one-sentence_summary": "We improve the state-of-the-art of conditional waveform synthesis by combining the strengths of GANs and autoregression", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "morrison|chunked_autoregressive_gan_for_conditional_waveform_synthesis", "pdf": "/pdf/070239829c83980ec499e2eff346d48eafe3ecb5.pdf", "supplementary_material": "/attachment/ec88c411918d8107aad13556f6a935a5a5d76484.zip", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 1 code implementation](https://www.catalyzex.com/paper/chunked-autoregressive-gan-for-conditional/code)", "_bibtex": "@inproceedings{\nmorrison2022chunked,\ntitle={Chunked Autoregressive {GAN} for Conditional Waveform Synthesis},\nauthor={Max Morrison and Rithesh Kumar and Kundan Kumar and Prem Seetharaman and Aaron Courville and Yoshua Bengio},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=v3aeIsY_vVX}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 14}} +{"id": "psh0oeMSBiF", "original": "nOFa1iJCGkS", "number": 4064, "cdate": 1632875728292, "mdate": null, "ddate": null, "tcdate": 1632875728292, "tmdate": 1750551554884, "tddate": null, "forum": "psh0oeMSBiF", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "COPA: Certifying Robust Policies for Offline Reinforcement Learning against Poisoning Attacks", "authorids": ["~Fan_Wu6", "~Linyi_Li1", "~Huan_Zhang1", "~Bhavya_Kailkhura1", "~Krishnaram_Kenthapadi1", "~Ding_Zhao1", "~Bo_Li19"], "authors": ["Fan Wu", "Linyi Li", "Huan Zhang", "Bhavya Kailkhura", "Krishnaram Kenthapadi", "Ding Zhao", "Bo Li"], "keywords": ["certified robustness", "poisoning attacks", "reinforcement learning"], "abstract": "As reinforcement learning (RL) has achieved near human-level performance in a variety of tasks, its robustness has raised great attention. While a vast body of research has explored test-time (evasion) attacks in RL and corresponding defenses, its robustness against training-time (poisoning) attacks remains largely unanswered. In this work, we focus on certifying the robustness of offline RL in the presence of poisoning attacks, where a subset of training trajectories could be arbitrarily manipulated. We propose the first certification framework, COPA, to certify the number of poisoning trajectories that can be tolerated regarding different certification criteria. Given the complex structure of RL, we propose two certification criteria: per-state action stability and cumulative reward bound. To further improve the certification, we propose new partition and aggregation protocols to train robust policies. We further prove that some of the proposed certification methods are theoretically tight and some are NP-Complete problems. We leverage COPA to certify three RL environments trained with different algorithms and conclude: (1) The proposed robust aggregation protocols such as temporal aggregation can significantly improve the certifications; (2) Our certifications for both per-state action stability and cumulative reward bound are efficient and tight; (3) The certification for different training algorithms and environments are different, implying their intrinsic robustness properties. All experimental results are available at https://copa-leaderboard.github.io.", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "wu|copa_certifying_robust_policies_for_offline_reinforcement_learning_against_poisoning_attacks", "pdf": "/pdf/0a24a116cb24a1e99cd715566dae243e36472472.pdf", "one-sentence_summary": "We propose the first framework for certifiying robustness of offline reinforcement learning against poisoning attacks.", "supplementary_material": "/attachment/1385e87d991dcb64b971334c8597727cd87c2d4c.zip", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 3 code implementations](https://www.catalyzex.com/paper/copa-certifying-robust-policies-for-offline/code)", "_bibtex": "@inproceedings{\nwu2022copa,\ntitle={{COPA}: Certifying Robust Policies for Offline Reinforcement Learning against Poisoning Attacks},\nauthor={Fan Wu and Linyi Li and Huan Zhang and Bhavya Kailkhura and Krishnaram Kenthapadi and Ding Zhao and Bo Li},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=psh0oeMSBiF}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 28}} +{"id": "Vzh1BFUCiIX", "original": "V_lMjeui4-2s", "number": 4056, "cdate": 1632875727766, "mdate": null, "ddate": null, "tcdate": 1632875727766, "tmdate": 1750551555270, "tddate": null, "forum": "Vzh1BFUCiIX", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "ExT5: Towards Extreme Multi-Task Scaling for Transfer Learning", "authorids": ["~Vamsi_Aribandi1", "~Yi_Tay1", "~Tal_Schuster1", "~Jinfeng_Rao2", "~Huaixiu_Steven_Zheng1", "~Sanket_Vaibhav_Mehta2", "~Honglei_Zhuang1", "~Vinh_Q._Tran1", "~Dara_Bahri1", "~Jianmo_Ni2", "~Jai_Gupta1", "~Kai_Hui1", "~Sebastian_Ruder2", "~Donald_Metzler1"], "authors": ["Vamsi Aribandi", "Yi Tay", "Tal Schuster", "Jinfeng Rao", "Huaixiu Steven Zheng", "Sanket Vaibhav Mehta", "Honglei Zhuang", "Vinh Q. Tran", "Dara Bahri", "Jianmo Ni", "Jai Gupta", "Kai Hui", "Sebastian Ruder", "Donald Metzler"], "keywords": ["Natural Language Processing", "Transfer Learning", "Multi-task Learning"], "abstract": "Despite the recent success of multi-task learning and transfer learning for natural language processing (NLP), few works have systematically studied the effect of scaling up the number of tasks during pre-training. Towards this goal, this paper introduces ExMix (Extreme Mixture): a massive collection of 107 supervised NLP tasks across diverse domains and task-families. Using ExMix, we study the effect of multi-task pre-training at the largest scale to date, and analyze co-training transfer amongst common families of tasks. Through this analysis, we show that manually curating an ideal set of tasks for multi-task pre-training is not straightforward, and that multi-task scaling can vastly improve models on its own. Finally, we propose ExT5: a model pre-trained using a multi-task objective of self-supervised span denoising and supervised ExMix. Via extensive experiments, we show that ExT5 outperforms strong T5 baselines on SuperGLUE, GEM, Rainbow, Closed-Book QA tasks, and several tasks outside of ExMix. ExT5 also significantly improves sample efficiency while pre-training.", "one-sentence_summary": "Using a suite of 107 NLP tasks, we show that massively multi-task pre-training can improve downstream performance on NLP tasks, overcoming trends of negative transfer between tasks while fine-tuning.", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "aribandi|ext5_towards_extreme_multitask_scaling_for_transfer_learning", "pdf": "/pdf/b64da5c159b90bf56d174fc67459b74928711232.pdf", "community_implementations": "[![CatalyzeX](/images/catalyzex_icon.svg) 3 code implementations](https://www.catalyzex.com/paper/ext5-towards-extreme-multi-task-scaling-for/code)", "_bibtex": "@inproceedings{\naribandi2022ext,\ntitle={ExT5: Towards Extreme Multi-Task Scaling for Transfer Learning},\nauthor={Vamsi Aribandi and Yi Tay and Tal Schuster and Jinfeng Rao and Huaixiu Steven Zheng and Sanket Vaibhav Mehta and Honglei Zhuang and Vinh Q. Tran and Dara Bahri and Jianmo Ni and Jai Gupta and Kai Hui and Sebastian Ruder and Donald Metzler},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=Vzh1BFUCiIX}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 12}} +{"id": "gRCCdgpVZf", "original": "oIfWJ9VtG-c", "number": 4049, "cdate": 1632875727301, "mdate": null, "ddate": null, "tcdate": 1632875727301, "tmdate": 1676330473704, "tddate": null, "forum": "gRCCdgpVZf", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "Provable Adaptation across Multiway Domains via Representation Learning", "authorids": ["~Zhili_Feng1", "~Shaobo_Han1", "~Simon_Shaolei_Du1"], "authors": ["Zhili Feng", "Shaobo Han", "Simon Shaolei Du"], "keywords": ["Representation learning", "tensor", "statistical learning theory"], "abstract": "This paper studies zero-shot domain adaptation where each domain is indexed on a multi-dimensional array, and we only have data from a small subset of domains. Our goal is to produce predictors that perform well on \\emph{unseen} domains. We propose a model which consists of a domain-invariant latent representation layer and a domain-specific linear prediction layer with a low-rank tensor structure. Theoretically, we present explicit sample complexity bounds to characterize the prediction error on unseen domains in terms of the number of domains with training data and the number of data per domain. To our knowledge, this is the first finite-sample guarantee for zero-shot domain adaptation. In addition, we provide experiments on two-way MNIST and four-way fiber sensing datasets to demonstrate the effectiveness of our proposed model.", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "feng|provable_adaptation_across_multiway_domains_via_representation_learning", "pdf": "/pdf/097cce8a39240bc2a614483e1cb4e0314237f10a.pdf", "supplementary_material": "", "_bibtex": "@inproceedings{\nfeng2022provable,\ntitle={Provable Adaptation across Multiway Domains via Representation Learning},\nauthor={Zhili Feng and Shaobo Han and Simon Shaolei Du},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=gRCCdgpVZf}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 12}} +{"id": "EXHG-A3jlM", "original": "UjHsBPC4-Fl", "number": 4047, "cdate": 1632875727165, "mdate": null, "ddate": null, "tcdate": 1632875727165, "tmdate": 1676330473752, "tddate": null, "forum": "EXHG-A3jlM", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "Efficient Token Mixing for Transformers via Adaptive Fourier Neural Operators", "authorids": ["~John_Guibas1", "~Morteza_Mardani1", "~Zongyi_Li1", "~Andrew_Tao1", "~Anima_Anandkumar1", "~Bryan_Catanzaro1"], "authors": ["John Guibas", "Morteza Mardani", "Zongyi Li", "Andrew Tao", "Anima Anandkumar", "Bryan Catanzaro"], "keywords": ["self attention", "linear complexity", "high-resolution inputs", "operator learning", "Fourier transform"], "abstract": "Vision transformers have delivered tremendous success in representation learning. This is primarily due to effective token mixing through self attention. However, this scales quadratically with the number of pixels, which becomes infeasible for high-resolution inputs. To cope with this challenge, we propose Adaptive Fourier Neural Operator (AFNO) as an efficient token mixer that learns to mix in the Fourier domain. AFNO is based on a principled foundation of operator learning which allows us to frame token mixing as a continuous global convolution without any dependence on the input resolution. This principle was previously used to design FNO, which solves global convolution efficiently in the Fourier domain and has shown promise in learning challenging PDEs. To handle challenges in visual representation learning such as discontinuities in images and high resolution inputs, we propose principled architectural modifications to FNO which results in memory and computational efficiency. This includes imposing a block-diagonal structure on the channel mixing weights, adaptively sharing weights across tokens, and sparsifying the frequency modes via soft-thresholding and shrinkage. The resulting model is highly parallel with a quasi-linear complexity and has linear memory in the sequence size. AFNO outperforms self-attention mechanisms for few-shot segmentation in terms of both efficiency and accuracy. For Cityscapes segmentation with the Segformer-B3 backbone, AFNO can handle a sequence size of 65k and outperforms other efficient self-attention mechanisms.", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "guibas|efficient_token_mixing_for_transformers_via_adaptive_fourier_neural_operators", "pdf": "/pdf/bec7c123720932f2545dfb12e85bab8ac5cca6ff.pdf", "one-sentence_summary": "We propose Adaptive Fourier Neural Operators (AFNO) for scaling self-attention to high resolution images in vision transformers by establishing a link between operator learning and token mixing.", "data": "", "_bibtex": "@inproceedings{\nguibas2022efficient,\ntitle={Efficient Token Mixing for Transformers via Adaptive Fourier Neural Operators},\nauthor={John Guibas and Morteza Mardani and Zongyi Li and Andrew Tao and Anima Anandkumar and Bryan Catanzaro},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=EXHG-A3jlM}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 8}} +{"id": "xENf4QUL4LW", "original": "MwCqS8r0UFE", "number": 4042, "cdate": 1632875726834, "mdate": null, "ddate": null, "tcdate": 1632875726834, "tmdate": 1676330474073, "tddate": null, "forum": "xENf4QUL4LW", "replyto": null, "invitation": "ICLR.cc/2022/Conference/-/Blind_Submission", "content": {"title": "Sample Selection with Uncertainty of Losses for Learning with Noisy Labels", "authorids": ["~Xiaobo_Xia1", "~Tongliang_Liu1", "~Bo_Han1", "~Mingming_Gong1", "~Jun_Yu3", "~Gang_Niu1", "~Masashi_Sugiyama1"], "authors": ["Xiaobo Xia", "Tongliang Liu", "Bo Han", "Mingming Gong", "Jun Yu", "Gang Niu", "Masashi Sugiyama"], "keywords": ["Learning with noisy labels", "Sample selection", "Uncertainty"], "abstract": "In learning with noisy labels, the sample selection approach is very popular, which regards small-loss data as correctly labeled data during training. However, losses are generated on-the-fly based on the model being trained with noisy labels, and thus large-loss data are likely but not certain to be incorrect. There are actually two possibilities of a large-loss data point: (a) it is mislabeled, and then its loss decreases slower than other data, since deep neural networks learn patterns first; (b) it belongs to an underrepresented group of data and has not been selected yet. In this paper, we incorporate the uncertainty of losses by adopting interval estimation instead of point estimation of losses, where lower bounds of the confidence intervals of losses derived from distribution-free concentration inequalities, but not losses themselves, are used for sample selection. In this way, we also give large-loss but less selected data a try; then, we can better distinguish between the cases (a) and (b) by seeing if the losses effectively decrease with the uncertainty after the try. As a result, we can better explore underrepresented data that are correctly labeled but seem to be mislabeled at first glance. Experiments demonstrate that the proposed method is superior to baselines and robust to a broad range of label noise types.", "code_of_ethics": "", "submission_guidelines": "", "resubmission": "", "student_author": "", "serve_as_reviewer": "", "paperhash": "xia|sample_selection_with_uncertainty_of_losses_for_learning_with_noisy_labels", "pdf": "/pdf/0ebab5bba4b36eec025abfd2e21f947e05d6e662.pdf", "supplementary_material": "/attachment/971d5ae740f63bbe9d8adfd2be565cc003f7fc0a.zip", "_bibtex": "@inproceedings{\nxia2022sample,\ntitle={Sample Selection with Uncertainty of Losses for Learning with Noisy Labels},\nauthor={Xiaobo Xia and Tongliang Liu and Bo Han and Mingming Gong and Jun Yu and Gang Niu and Masashi Sugiyama},\nbooktitle={International Conference on Learning Representations},\nyear={2022},\nurl={https://openreview.net/forum?id=xENf4QUL4LW}\n}", "venue": "ICLR 2022 Poster", "venueid": "ICLR.cc/2022/Conference"}, "signatures": ["ICLR.cc/2022/Conference"], "readers": ["everyone"], "nonreaders": [], "writers": ["ICLR.cc/2022/Conference"], "pdate": 1643407560000, "odate": 1633539600000, "details": {"replyCount": 25}}