@article {3416, title = {SimuQ: A Framework for Programming Quantum Hamiltonian Simulation with Analog Compilation}, journal = {Proceedings of the ACM on Programming Languages}, volume = {8}, year = {2024}, month = {11/19/2023}, pages = {2425{\textendash}2455}, abstract = {
Quantum Hamiltonian simulation, which simulates the evolution of quantum systems and probes quantum phenomena, is one of the most promising applications of quantum computing. Recent experimental results suggest that Hamiltonian-oriented analog quantum simulation would be advantageous over circuit-oriented digital quantum simulation in the Noisy Intermediate-Scale Quantum (NISQ) machine era. However, programming analog quantum simulators is much more challenging due to the lack of a unified interface between hardware and software. In this paper, we design and implement SimuQ, the first framework for quantum Hamiltonian simulation that supports Hamiltonian programming and pulse-level compilation to heterogeneous analog quantum simulators. Specifically, in SimuQ, front-end users specify the target quantum system with Hamiltonian Modeling Language, and the Hamiltonian-level programmability of analog quantum simulators is specified through a new abstraction called the abstract analog instruction set (AAIS) and programmed in AAIS Specification Language by hardware providers. Through a solver-based compilation, SimuQ generates executable pulse schedules for real devices to simulate the evolution of desired quantum systems, which is demonstrated on superconducting (IBM), neutral-atom (QuEra), and trapped-ion (IonQ) quantum devices. Moreover, we demonstrate the advantages of exposing the Hamiltonian-level programmability of devices with native operations or interaction-based gates and establish a small benchmark of quantum simulation to evaluate SimuQ\&$\#$39;s compiler with the above analog quantum simulators.
}, issn = {2475-1421}, doi = {10.1145/3632923}, url = {https://arxiv.org/abs/2303.02775}, author = {Peng, Yuxiang and Young, Jacob and Liu, Pengyu and Wu, Xiaodi} } @article {3264, title = {Self-dual quasiperiodic percolation}, journal = {Phys. Rev. E}, volume = {107}, year = {2023}, month = {2/27/2023}, pages = {024137}, abstract = {How does the percolation transition behave in the absence of quenched randomness? To address this question, we study two nonrandom self-dual quasiperiodic models of square-lattice bond percolation. In both models, the critical point has emergent discrete scale invariance, but none of the additional emergent conformal symmetry of critical random percolation. From the discrete sequences of critical clusters, we find fractal dimensions of Df=1.911943(1) and Df=1.707234(40) for the two models, significantly different from Df=91/48=1.89583... of random percolation. The critical exponents ν, determined through a numerical study of cluster sizes and wrapping probabilities on a torus, are also well below the ν=4/3 of random percolation. While these new models do not appear to belong to a universality class, they demonstrate how the removal of randomness can fundamentally change the critical behavior.
}, doi = {10.1103/PhysRevE.107.024137}, url = {https://arxiv.org/abs/2206.11290}, author = {Sommers, Grace M. and Gullans, Michael J. and Huse, David A.} } @article {2859, title = {Shadow process tomography of quantum channels}, journal = {Phys. Rev. A}, volume = {107}, year = {2023}, month = {4/4/2023}, abstract = {Quantum process tomography is a critical capability for building quantum computers, enabling quantum networks, and understanding quantum sensors. Like quantum state tomography, the process tomography of an arbitrary quantum channel requires a number of measurements that scale exponentially in the number of quantum bits affected. However, the recent field of shadow tomography, applied to quantum states, has demonstrated the ability to extract key information about a state with only polynomially many measurements. In this work, we apply the concepts of shadow state tomography to the challenge of characterizing quantum processes. We make use of the Choi isomorphism to directly apply rigorous bounds from shadow state tomography to shadow process tomography, and we find additional bounds on the number of measurements that are unique to process tomography. Our results, which include algorithms for implementing shadow process tomography enable new techniques including evaluation of channel concatenation and the application of channels to shadows of quantum states. This provides a dramatic improvement for understanding large-scale quantum systems.
}, doi = {https://doi.org/10.1103/PhysRevA.107.042403}, url = {https://arxiv.org/abs/2110.03629}, author = {Jonathan Kunjummen and Minh C. Tran and Daniel Carney and Jacob M. Taylor} } @article {3312, title = {A sharp phase transition in linear cross-entropy benchmarking}, year = {2023}, month = {5/8/2023}, abstract = {Demonstrations of quantum computational advantage and benchmarks of quantum processors via quantum random circuit sampling are based on evaluating the linear cross-entropy benchmark (XEB). A key question in the theory of XEB is whether it approximates the fidelity of the quantum state preparation. Previous works have shown that the XEB generically approximates the fidelity in a regime where the noise rate per qudit ε satisfies εN<<1 for a system of N qudits and that this approximation breaks down at large noise rates. Here, we show that the breakdown of XEB as a fidelity proxy occurs as a sharp phase transition at a critical value of εN that depends on the circuit architecture and properties of the two-qubit gates, including in particular their entangling power. We study the phase transition using a mapping of average two-copy quantities to statistical mechanics models in random quantum circuit architectures with full or one-dimensional connectivity. We explain the phase transition behavior in terms of spectral properties of the transfer matrix of the statistical mechanics model and identify two-qubit gate sets that exhibit the largest noise robustness.
}, url = {https://arxiv.org/abs/2305.04954}, author = {Brayden Ware and Abhinav Deshpande and Dominik Hangleiter and Pradeep Niroula and Bill Fefferman and Alexey V. Gorshkov and Michael J. Gullans} } @article {3249, title = {SimuQ: A Domain-Specific Language For Quantum Simulation With Analog Compilation}, year = {2023}, month = {3/5/2023}, abstract = {Hamiltonian simulation is one of the most promising applications of quantum computing. Recent experimental results suggest that continuous-time analog quantum simulation would be advantageous over gate-based digital quantum simulation in the Noisy Intermediate-Size Quantum (NISQ) machine era. However, programming such analog quantum simulators is much more challenging due to the lack of a unified interface between hardware and software, and the only few known examples are all hardware-specific. In this paper, we design and implement SimuQ, the first domain-specific language for Hamiltonian simulation that supports pulse-level compilation to heterogeneous analog quantum simulators. Specifically, in SimuQ, front-end users will specify the target Hamiltonian evolution with a Hamiltonian modeling language, and the programmability of analog simulators is specified through a new abstraction called the abstract analog instruction set by hardware providers. Through a solver-based compilation, SimuQ will generate the pulse-level instruction schedule on the target analog simulator for the desired Hamiltonian evolution, which has been demonstrated on pulse-controlled superconducting (Qiskit Pulse) and neutral-atom (QuEra Bloqade) quantum systems, as well as on normal circuit-based digital quantum machines. Moreover, we also demonstrate the advantage of analog compilation over digital compilation on IBM machines, the use of SimuQ for resource estimation for hypothetical machines, and a scalability test of SimuQ\&$\#$39;s compilation.
}, url = {https://arxiv.org/abs/2303.02775}, author = {Yuxiang Peng and Jacob Young and Pengyu Liu and Xiaodi Wu} } @article {3255, title = {Spin-selective strong light-matter coupling in a 2D hole gas-microcavity system}, year = {2023}, month = {2/12/2023}, abstract = {The interplay between time-reversal symmetry breaking and strong light-matter coupling in 2D gases brings intriguing aspects to polariton physics. This combination can lead to polarization/spin selective light-matter interaction in the strong coupling regime. In this work, we report such a selective strong light-matter interaction by harnessing a 2D gas in the quantum Hall regime coupled to a microcavity. Specifically, we demonstrate circular-polarization dependence of the vacuum Rabi splitting, as a function of magnetic field and hole density. We provide a quantitative understanding of the phenomenon by modeling the coupling of optical transitions between Landau levels to the microcavity. This method introduces a control tool over the spin degree of freedom in polaritonic semiconductor systems, paving the way for new experimental possibilities in light-matter hybrids.
}, url = {https://arxiv.org/abs/2302.06023}, author = {Daniel G. Suarez-Forero and Deric Weston Session and Mahmoud Jalali Mehrabad and Patrick Knuppel and Stefan Faelt and Werner Wegscheider and Mohammad Hafezi} } @article {3260, title = {On the stability of solutions to Schr{\"o}dinger{\textquoteright}s equation short of the adiabatic limit}, year = {2023}, month = {3/23/2023}, abstract = {We prove an adiabatic theorem that applies at timescales short of the adiabatic limit. Our proof analyzes the stability of solutions to Schrodinger\&$\#$39;s equation under perturbation. We directly characterize cross-subspace effects of perturbation, which are typically significantly less than suggested by the perturbation\&$\#$39;s operator norm. This stability has numerous consequences: we can (1) find timescales where the solution of Schrodinger\&$\#$39;s equation converges to the ground state of a block, (2) lower bound the convergence to the global ground state by demonstrating convergence to some other known quantum state, (3) guarantee faster convergence than the standard adiabatic theorem when the ground state of the perturbed Hamiltonian (H) is close to that of the unperturbed H, and (4) bound tunneling effects in terms of the global spectral gap when H is {\textquoteleft}{\textquoteleft}stoquastic\&$\#$39;\&$\#$39; (a Z-matrix). Our results apply to quantum annealing protocols with faster convergence than usually guaranteed by a standard adiabatic theorem. Our upper and lower bounds demonstrate that at timescales short of the adiabatic limit, subspace dynamics can dominate over global dynamics. Thus, we see that convergence to particular target states can be understood as the result of otherwise local dynamics.
}, url = {https://arxiv.org/abs/2303.13478}, author = {Jacob Bringewatt and Michael Jarret and T. C. Mooney} } @article {3357, title = {Streaming quantum state purification}, year = {2023}, month = {9/28/2023}, abstract = {Quantum state purification is the task of recovering a nearly pure copy of an unknown pure quantum state using multiple noisy copies of the state. This basic task has applications to quantum communication over noisy channels and quantum computation with imperfect devices, but has only been studied previously for the case of qubits. We derive an efficient purification procedure based on the swap test for qudits of any dimension, starting with any initial error parameter. Treating the initial error parameter and the dimension as constants, we show that our procedure has sample complexity asymptotically optimal in the final error parameter. Our protocol has a simple recursive structure that can be applied when the states are provided one at a time in a streaming fashion, requiring only a small quantum memory to implement.
}, url = {https://arxiv.org/abs/2309.16387}, author = {Andrew M. Childs and Honghao Fu and Debbie Leung and Zhi Li and Maris Ozols and Vedang Vyas} } @article {3266, title = {Strongly incoherent gravity}, year = {2023}, month = {1/20/2023}, abstract = {While most fundamental interactions in nature are known to be mediated by quantized fields, the possibility has been raised that gravity may behave differently. Making this concept precise enough to test requires consistent models. Here we construct an explicit example of a theory where a non-entangling version of an arbitrary two-body potential V(r) arises from local measurements and feedback forces. While a variety of such theories exist, our construction causes particularly strong decoherence compared to more subtle approaches. Regardless, expectation values of observables obey the usual classical dynamics, while the interaction generates no entanglement. Applied to the Newtonian potential, this produces a non-relativistic model of gravity with fundamental loss of unitarity. The model contains a pair of free parameters, a substantial range of which is not excluded by observations to date. As an alternative to testing entanglement properties, we show that the entire remaining parameter space can be tested by looking for loss of quantum coherence in small systems like atom interferometers coupled to oscillating source masses.
}, url = {https://arxiv.org/abs/2301.08378}, author = {Daniel Carney and Jacob M. Taylor} } @article {3403, title = {Subsystem CSS codes, a tighter stabilizer-to-CSS mapping, and Goursat{\textquoteright}s Lemma}, year = {2023}, month = {11/29/2023}, abstract = {The CSS code construction is a powerful framework used to express features of a quantum code in terms of a pair of underlying classical codes. Its subsystem extension allows for similar expressions, but the general case has not been fully explored. Extending previous work of Aly et. al. [quant-ph/0610153], we determine subsystem CSS code parameters, express codewords, and develop a Steane-type decoder using only data from the two underlying classical codes. We show that any subsystem stabilizer code can be {\textquoteleft}{\textquoteleft}doubled\&$\#$39;\&$\#$39; to yield a subsystem CSS code with twice the number of physical, logical, and gauge qudits and up to twice the code distance. This mapping preserves locality and is tighter than the Majorana-based mapping of Bravyi, Leemhuis, and Terhal [New J. Phys. 12 083039 (2010)]. Using Goursat\&$\#$39;s Lemma, we show that every subsystem stabilizer code can be constructed from two nested subsystem CSS codes satisfying certain constraints, and we characterize subsystem stabilizer codes based on the nested codes\&$\#$39; properties.
}, url = {https://arxiv.org/abs/2311.18003}, author = {Michael Liaofan Liu and Nathanan Tantivasadakarn and Victor V. Albert} } @article {3254, title = {Symphony: Expressive Secure Multiparty Computation with Coordination}, journal = {The Art, Science, and Engineering of Programming}, volume = {7}, year = {2023}, month = {2/20/2023}, type = {14}, abstract = {Context: Secure Multiparty Computation (MPC) refers to a family of cryptographic techniques where mutually untrusting parties may compute functions of their private inputs while revealing only the function output. Inquiry: It can be hard to program MPCs correctly and efficiently using existing languages and frameworks, especially when they require coordinating disparate computational roles. How can we make this easier? Approach: We present Symphony, a new functional programming language for MPCs among two or more parties. Symphony starts from the single-instruction, multiple-data (SIMD) semantics of prior MPC languages, in which each party carries out symmetric responsibilities, and generalizes it using constructs that can coordinate many parties. Symphony introduces **first-class shares** and **first-class party sets** to provide unmatched language-level expressive power with high efficiency. Knowledge: Developing a core formal language called λ-Symphony, we prove that the intuitive, generalized SIMD view of a program coincides with its actual distributed semantics. Thus the programmer can reason about her programs by reading them from top to bottom, even though in reality the program runs in a coordinated fashion, distributed across many machines. We implemented a prototype interpreter for Symphony leveraging multiple cryptographic backends. With it we wrote a variety of MPC programs, finding that Symphony can express optimized protocols that other languages cannot, and that in general Symphony programs operate efficiently. [ full abstract at https://doi.org/10.22152/programming-journal.org/2023/7/14 ]\
}, doi = {10.22152/programming-journal.org/2023/7/14}, url = {https://arxiv.org/abs/2302.10076}, author = {Ian Sweet and David Darais and David Heath and William Harris and Ryan Estes and Michael Hicks} } @article {3189, title = {Sample-optimal classical shadows for pure states}, year = {2022}, month = {11/21/2022}, abstract = {We consider the classical shadows task for pure states in the setting of both joint and independent measurements. The task is to measure few copies of an unknown pure state ρ in order to learn a classical description which suffices to later estimate expectation values of observables. Specifically, the goal is to approximate Tr(Oρ) for any Hermitian observable O to within additive error ϵ provided Tr(O2)\≤B and ||O||=1. Our main result applies to the joint measurement setting, where we show Θ~(B\−\−\√ϵ\−1+ϵ\−2) samples of ρ are necessary and sufficient to succeed with high probability. The upper bound is a quadratic improvement on the previous best sample complexity known for this problem. For the lower bound, we see that the bottleneck is not how fast we can learn the state but rather how much any classical description of ρ can be compressed for observable estimation. In the independent measurement setting, we show that O(Bd\−\−\−\√ϵ\−1+ϵ\−2) samples suffice. Notably, this implies that the random Clifford measurements algorithm of Huang, Kueng, and Preskill, which is sample-optimal for mixed states, is not optimal for pure states. Interestingly, our result also uses the same random Clifford measurements but employs a different estimator.
}, keywords = {FOS: Computer and information sciences, FOS: Physical sciences, Information Theory (cs.IT), Machine Learning (cs.LG), Quantum Physics (quant-ph)}, doi = {10.48550/ARXIV.2211.11810}, url = {https://arxiv.org/abs/2211.11810}, author = {Grier, Daniel and Pashayan, Hakop and Schaeffer, Luke} } @article {3136, title = {Scalably learning quantum many-body Hamiltonians from dynamical data}, year = {2022}, month = {9/28/2022}, abstract = {The physics of a closed quantum mechanical system is governed by its Hamiltonian. However, in most practical situations, this Hamiltonian is not precisely known, and ultimately all there is are data obtained from measurements on the system. In this work, we introduce a highly scalable, data-driven approach to learning families of interacting many-body Hamiltonians from dynamical data, by bringing together techniques from gradient-based optimization from machine learning with efficient quantum state representations in terms of tensor networks. Our approach is highly practical, experimentally friendly, and intrinsically scalable to allow for system sizes of above 100 spins. In particular, we demonstrate on synthetic data that the algorithm works even if one is restricted to one simple initial state, a small number of single-qubit observables, and time evolution up to relatively short times. For the concrete example of the one-dimensional Heisenberg model our algorithm exhibits an error constant in the system size and scaling as the inverse square root of the size of the data set.
}, keywords = {FOS: Computer and information sciences, FOS: Physical sciences, Machine Learning (cs.LG), Quantum Gases (cond-mat.quant-gas), Quantum Physics (quant-ph), Strongly Correlated Electrons (cond-mat.str-el)}, doi = {10.48550/ARXIV.2209.14328}, url = {https://arxiv.org/abs/2209.14328}, author = {Wilde, Frederik and Kshetrimayum, Augustine and Roth, Ingo and Hangleiter, Dominik and Sweke, Ryan and Eisert, Jens} } @article {3204, title = {A scheme to create and verify scalable entanglement in optical lattice}, journal = {npj Quantum Information}, volume = {8}, year = {2022}, month = {9/4/2022}, abstract = {To achieve scalable quantum information processing, great efforts have been devoted to the creation of large-scale entangled states in various physical systems. Ultracold atom in optical lattice is considered as one of the promising platforms due to its feasible initialization and parallel manipulation. In this work, we propose an efficient scheme to generate and characterize global entanglement in the optical lattice. With only two-layer quantum circuits, the generation utilizes two-qubit entangling gates based on the superexchange interaction in double wells. The parallelism of these operations enables the generation to be fast and scalable. To verify the entanglement of this non-stabilizer state, we mainly design three complementary detection protocols which are less resource-consuming compared to the full tomography. In particular, one just needs two homogenous local measurement settings to identify the entanglement property. Our entanglement generation and verification protocols provide the foundation for the further quantum information processing in optical lattice.
}, doi = {10.1038/s41534-022-00609-0}, url = {https://arxiv.org/abs/2209.01531}, author = {You Zhou and Bo Xiao and Meng-Da Li and Qi Zhao and Zhen-Sheng Yuan and Xiongfeng Ma and Jian-Wei Pan} } @article {3009, title = {Self-Testing of a Single Quantum System: Theory and Experiment}, year = {2022}, month = {3/17/2022}, abstract = {Certifying individual quantum devices with minimal assumptions is crucial for the development of quantum technologies. Here, we investigate how to leverage single-system contextuality to realize self-testing. We develop a robust self-testing protocol based on the simplest contextuality witness for the simplest contextual quantum system, the Klyachko-Can-Binicio{\u g}lu-Shumovsky (KCBS) inequality for the qutrit. We establish a lower bound on the fidelity of the state and the measurements (to an ideal configuration) as a function of the value of the witness under a pragmatic assumption on the measurements we call the KCBS orthogonality condition. We apply the method in an experiment with randomly chosen measurements on a single trapped 40Ca+ and near-perfect detection efficiency. The observed statistics allow us to self-test the system and provide the first experimental demonstration of quantum self-testing of a single system. Further, we quantify and report that deviations from our assumptions are minimal, an aspect previously overlooked by contextuality experiments.
}, keywords = {Atomic Physics (physics.atom-ph), FOS: Physical sciences, Quantum Physics (quant-ph)}, doi = {https://doi.org/10.48550/arXiv.2203.09003}, url = {https://arxiv.org/abs/2203.09003}, author = {Hu, Xiao-Min and Xie, Yi and Arora, Atul Singh and Ai, Ming-Zhong and Bharti, Kishor and Zhang, Jie and Wu, Wei and Chen, Ping-Xing and Cui, Jin-Ming and Liu, Bi-Heng and Huang, Yun-Feng and Li, Chuan-Feng and Guo, Guang-Can and Roland, J{\'e}r{\'e}mie and Cabello, Ad{\'a}n and Kwek, Leong-Chuan} } @article {3017, title = {Shadow Distillation: Quantum Error Mitigation with Classical Shadows for Near-Term Quantum Processors}, year = {2022}, month = {3/14/2022}, abstract = {Mitigating errors in quantum information processing devices is especially important in the absence of fault tolerance. An effective method in suppressing state-preparation errors is using multiple copies to distill the ideal component from a noisy quantum state. Here, we use classical shadows and randomized measurements to circumvent the need for coherent access to multiple copies at an exponential cost. We study the scaling of resources using numerical simulations and find that the overhead is still favorable compared to full state tomography. We optimize measurement resources under realistic experimental constraints and apply our method to an experiment preparing Greenberger-Horne-Zeilinger (GHZ) state with trapped ions. In addition to improving stabilizer measurements, the analysis of the improved results reveals the nature of errors affecting the experiment. Hence, our results provide a directly applicable method for mitigating errors in near-term quantum computers.
}, keywords = {FOS: Physical sciences, Quantum Physics (quant-ph)}, doi = {10.48550/ARXIV.2203.07309}, url = {https://arxiv.org/abs/2203.07309}, author = {Seif, Alireza and Cian, Ze-Pei and Zhou, Sisi and Chen, Senrui and Jiang, Liang} } @article {3178, title = {Sharp complexity phase transitions generated by entanglement}, year = {2022}, month = {12/20/2022}, abstract = {Entanglement is one of the physical properties of quantum systems responsible for the computational hardness of simulating quantum systems. But while the runtime of specific algorithms, notably tensor network algorithms, explicitly depends on the amount of entanglement in the system, it is unknown whether this connection runs deeper and entanglement can also cause inherent, algorithm-independent complexity. In this work, we quantitatively connect the entanglement present in certain quantum systems to the computational complexity of simulating those systems. Moreover, we completely characterize the entanglement and complexity as a function of a system parameter. Specifically, we consider the task of simulating single-qubit measurements of k--regular graph states on n qubits. We show that, as the regularity parameter is increased from 1 to n\−1, there is a sharp transition from an easy regime with low entanglement to a hard regime with high entanglement at k=3, and a transition back to easy and low entanglement at k=n\−3. As a key technical result, we prove a duality for the simulation complexity of regular graph states between low and high regularity.
}, keywords = {Computational Complexity (cs.CC), FOS: Computer and information sciences, FOS: Physical sciences, Quantum Physics (quant-ph)}, doi = {10.48550/ARXIV.2212.10582}, url = {https://arxiv.org/abs/2212.10582}, author = {Ghosh, Soumik and Deshpande, Abhinav and Hangleiter, Dominik and Gorshkov, Alexey V. and Fefferman, Bill} } @article {3071, title = {Simulation Complexity of Many-Body Localized Systems}, year = {2022}, month = {5/25/2022}, abstract = {We use complexity theory to rigorously investigate the difficulty of classically simulating evolution under many-body localized (MBL) Hamiltonians. Using the defining feature that MBL systems have a complete set of quasilocal integrals of motion (LIOMs), we demonstrate a transition in the classical complexity of simulating such systems as a function of evolution time. On one side, we construct a quasipolynomial-time tensor-network-inspired algorithm for strong simulation of 1D MBL systems (i.e., calculating the expectation value of arbitrary products of local observables) evolved for any time polynomial in the system size. On the other side, we prove that even weak simulation, i.e. sampling, becomes formally hard after an exponentially long evolution time, assuming widely believed conjectures in complexity theory. Finally, using the consequences of our classical simulation results, we also show that the quantum circuit complexity for MBL systems is sublinear in evolution time. This result is a counterpart to a recent proof that the complexity of random quantum circuits grows linearly in time.\
}, url = {https://arxiv.org/abs/2205.12967}, author = {Adam Ehrenberg and Abhinav Deshpande and Christopher L. Baldwin and Dmitry A. Abanin and Alexey V. Gorshkov} } @article {3018, title = {Simultaneous Stoquasticity}, journal = {Phys. Rev. A}, volume = {105}, year = {2022}, month = {06/09/2022}, abstract = {Stoquastic Hamiltonians play a role in the computational complexity of the local Hamiltonian problem as well as the study of classical simulability. In particular, stoquastic Hamiltonians can be straightforwardly simulated using Monte Carlo techniques. We address the question of whether two or more Hamiltonians may be made simultaneously stoquastic via a unitary transformation. This question has important implications for the complexity of simulating quantum annealing where quantum advantage is related to the stoquasticity of the Hamiltonians involved in the anneal. We find that for almost all problems no such unitary exists and show that the problem of determining the existence of such a unitary is equivalent to identifying if there is a solution to a system of polynomial (in)equalities in the matrix elements of the initial and transformed Hamiltonians. Solving such a system of equations is NP-hard. We highlight a geometric understanding of this problem in terms of a collection of generalized Bloch vectors.
}, keywords = {FOS: Physical sciences, Quantum Physics (quant-ph)}, doi = {https://doi.org/10.1103/PhysRevA.105.062601}, url = {https://arxiv.org/abs/2202.08863}, author = {Jacob Bringewatt and Brady, Lucas T.} } @article {3065, title = {A single T-gate makes distribution learning hard}, year = {2022}, month = {7/7/2022}, abstract = {The task of learning a probability distribution from samples is ubiquitous across the natural sciences. The output distributions of local quantum circuits form a particularly interesting class of distributions, of key importance both to quantum advantage proposals and a variety of quantum machine learning algorithms. In this work, we provide an extensive characterization of the learnability of the output distributions of local quantum circuits. Our first result yields insight into the relationship between the efficient learnability and the efficient simulatability of these distributions. Specifically, we prove that the density modelling problem associated with Clifford circuits can be efficiently solved, while for depth d=nΩ(1) circuits the injection of a single T-gate into the circuit renders this problem hard. This result shows that efficient simulatability does not imply efficient learnability. Our second set of results provides insight into the potential and limitations of quantum generative modelling algorithms. We first show that the generative modelling problem associated with depth d=nΩ(1) local quantum circuits is hard for any learning algorithm, classical or quantum. As a consequence, one cannot use a quantum algorithm to gain a practical advantage for this task. We then show that, for a wide variety of the most practically relevant learning algorithms -- including hybrid-quantum classical algorithms -- even the generative modelling problem associated with depth d=ω(log(n)) Clifford circuits is hard. This result places limitations on the applicability of near-term hybrid quantum-classical generative modelling algorithms.
}, url = {https://arxiv.org/abs/2207.03140}, author = {Marcel Hinsche and Marios Ioannou and Alexander Nietner and Jonas Haferkamp and Yihui Quek and Dominik Hangleiter and Jean-Pierre Seifert and Jens Eisert and Ryan Sweke} } @article {2999, title = {Snowmass 2021 White Paper: Tabletop experiments for infrared quantum gravity}, year = {2022}, month = {3/22/2022}, abstract = {Progress in the quantum readout and control of mechanical devices from single atoms to large masses may enable a first generation of experiments probing the gravitational interaction in the quantum regime, conceivably within the next decade. In this Snowmass whitepaper, we briefly outline the possibilities and challenges facing the realization of these experiments. In particular, we emphasize the need for detailed theories of modifications to the usual effective QFT of gravitons in the infrared regime E/L3< The absence of clear signals from particle dark matter in direct detection experiments motivates new approaches in disparate regions of viable parameter space. In this Snowmass white paper, we outline the Windchime project, a program to build a large array of quantum-enhanced mechanical sensors. The ultimate aim is to build a detector capable of searching for Planck mass-scale dark matter purely through its gravitational coupling to ordinary matter. In the shorter term, we aim to search for a number of other physics targets, especially some ultralight dark matter candidates. Here, we discuss the basic design, open R\&D challenges and opportunities, current experimental efforts, and both short- and long-term physics targets of the Windchime project. It is widely expected that systems which fully thermalize are chaotic in the sense of exhibiting random-matrix statistics of their energy level spacings, whereas integrable systems exhibit Poissonian statistics. In this paper, we investigate a third class: spin glasses. These systems are partially chaotic but do not achieve full thermalization due to large free energy barriers. We examine the level spacing statistics of a canonical infinite-range quantum spin glass, the quantum p-spherical model, using an analytic path integral approach. We find statistics consistent with a direct sum of independent random matrices, and show that the number of such matrices is equal to the number of distinct metastable configurations -- the exponential of the spin glass \"complexity\" as obtained from the quantum Thouless-Anderson-Palmer equations. We also consider the statistical properties of the complexity itself and identify a set of contributions to the path integral which suggest a Poissonian distribution for the number of metastable configurations. Our results show that level spacing statistics can probe the ergodicity-breaking in quantum spin glasses and provide a way to generalize the notion of spin glass complexity beyond models with a semi-classical limit. The National Institute of Standards and Technology is in the process of selecting publickey cryptographic algorithms through a public, competition-like process. The new publickey cryptography standards will specify additional digital signature, public-key encryption, and key-establishment algorithms to augment Federal Information Processing Standard (FIPS) 186-4, Digital Signature Standard (DSS), as well as NIST Special Publication (SP) 800-56A Revision 3, Recommendation for Pair-Wise Key-Establishment Schemes Using Discrete Logarithm Cryptography, and SP 800-56B Revision 2, Recommendation for Pair-Wise Key Establishment Using Integer Factorization Cryptography. It is intended that these algorithms will be capable of protecting sensitive information well into the foreseeable future, including after the advent of quantum computers. This report describes the evaluation and selection process of the NIST Post-Quantum Cryptography Standardization process third-round candidates based on public feedback and internal review. The report summarizes each of the 15 third-round candidate algorithms and identifies those selected for standardization, as well as those that will continue to be evaluated in a fourth round of analysis. The public-key encryption and key-establishment algorithm that will be standardized is CRYSTALS\–KYBER. The digital signatures that will be standardized are CRYSTALS\–Dilithium, FALCON, and SPHINCS+. While there are multiple signature algorithms selected, NIST recommends CRYSTALS\–Dilithium as the primary algorithm to be implemented. In addition, four of the alternate key-establishment candidate algorithms will advance to a fourth round of evaluation: BIKE, Classic McEliece, HQC, and SIKE. These candidates are still being considered for future standardization. NIST will also issue a new Call for Proposals for public-key digital signature algorithms to augment and diversify its signature portfolio. Dissipative systems can often exhibit wavelength-dependent loss rates. One prominent example is Rydberg polaritons formed by electromagnetically-induced transparency, which have long been a leading candidate for studying the physics of interacting photons and also hold promise as a platform for quantum information. In this system, dissipation is in the form of quantum diffusion, i.e., proportional to k2 (k being the wavevector) and vanishing at long wavelengths as k\→0. Here, we show that one-dimensional condensates subject to this type of loss are unstable to long-wavelength density fluctuations in an unusual manner: after a prolonged period in which the condensate appears to relax to a uniform state, local depleted regions quickly form and spread ballistically throughout the system. We connect this behavior to the leading-order equation for the nearly-uniform condensate -- a dispersive analogue to the Kardar-Parisi-Zhang (KPZ) equation -- which develops singularities in finite time. Furthermore, we show that the wavefronts of the depleted regions are described by purely dissipative solitons within a pair of hydrodynamic equations, with no counterpart in lossless condensates. We close by discussing conditions under which such singularities and the resulting solitons can be physically realized. Non-Abelian defects that bind Majorana or parafermion zero modes are prominent in several topological quantum computation schemes. Underpinning their established understanding is the quantum Ising spin chain, which can be recast as a fermionic model or viewed as a standalone effective theory for the surface-code edge -- both of which harbor non-Abelian defects. We generalize these notions by deriving an effective Ising-like spin chain describing the edge of quantum-double topological order. Relating Majorana and parafermion modes to anyonic strings, we introduce quantum-double generalizations of non-Abelian defects. We develop a way to embed finite-group valued qunits into those valued in continuous groups. Using this embedding, we provide a continuum description of the spin chain and recast its non-interacting part as a quantum wire via addition of a Wess-Zumino-Novikov-Witten term and non-Abelian bosonization. We present a method for network-capable quantum computing that relies on holographic spin-wave excitations stored collectively in ensembles of qubits. We construct an orthogonal basis of spin waves in a one-dimensional array and show that high-fidelity universal linear controllability can be achieved using only phase shifts, applied in both momentum and position space. Neither single-site addressability nor high single-qubit cooperativity is required, and the spin waves can be read out with high efficiency into a single cavity mode for quantum computing and networking applications.\ We address spin transport in the easy-axis Heisenberg spin chain subject to integrability-breaking perturbations. We find that spin transport is subdiffusive with dynamical exponent z=4 up to a timescale that is parametrically long in the anisotropy. In the limit of infinite anisotropy, transport is subdiffusive at all times; for large finite anisotropy, one eventually recovers diffusion at late times, but with a diffusion constant independent of the strength of the integrability breaking perturbation. We provide numerical evidence for these findings, and explain them by adapting the generalized hydrodynamics framework to nearly integrable dynamics. Our results show that the diffusion constant of near-integrable interacting spin chains is not generically a continuous function of the integrability-breaking parameter.\ We provide an efficient algorithm to compile quantum circuits for fault-tolerant execution. We target surface codes, which form a 2D grid of logical qubits with nearest-neighbor logical operations. Embedding an input circuit\&$\#$39;s qubits in surface codes can result in long-range two-qubit operations across the grid. We show how to prepare many long-range Bell pairs on qubits connected by edge-disjoint paths of ancillas in constant depth which can be used to perform these long-range operations. This forms one core part of our Edge-Disjoint Paths Compilation (EDPC) algorithm, by easily performing parallel long-range Clifford operations in constant depth. It also allows us to establish a connection between surface code compilation and several well-studied edge-disjoint paths problems. Similar techniques allow us to perform non-Clifford single-qubit rotations far from magic state distillation factories. In this case, we can easily find the maximum set of paths by a max-flow reduction, which forms the other major part of our EDPC algorithm. We compare EDPC to other compilation approaches including a SWAP-based algorithm, and find significantly improved performance for circuits built from parallel CNOTs, and for circuits which implement the multi-controlled X gate. We study synchronous values of games, especially synchronous games. It is known that a synchronous game has a perfect strategy if and only if it has a perfect synchronous strategy. However, we give examples of synchronous games, in particular graph colouring games, with synchronous value that is strictly smaller than their ordinary value. Thus, the optimal strategy for a synchronous game need not be synchronous. We derive a formula for the synchronous value of an XOR game as an optimization problem over a spectrahedron involving a matrix related to the cost matrix. We give an example of a game such that the synchronous value of repeated products of the game is strictly increasing. We show that the synchronous quantum bias of the XOR of two XOR games is not multiplicative. Finally, we derive geometric and algebraic conditions that a set of projections that yields the synchronous value of a game must satisfy. We present an algorithmic framework for quantum-inspired classical algorithms on close-to-low-rank matrices, generalizing the series of results started by Tang\&$\#$39;s breakthrough quantum-inspired algorithm for recommendation systems [STOC\&$\#$39;19]. Motivated by quantum linear algebra algorithms and the quantum singular value transformation (SVT) framework of Gily{\'e}n et al. [STOC\&$\#$39;19], we develop classical algorithms for SVT that run in time independent of input dimension, under suitable quantum-inspired sampling assumptions. Our results give compelling evidence that in the corresponding QRAM data structure input model, quantum SVT does not yield exponential quantum speedups. Since the quantum SVT framework generalizes essentially all known techniques for quantum linear algebra, our results, combined with sampling lemmas from previous work, suffice to generalize all recent results about dequantizing quantum machine learning algorithms. In particular, our classical SVT framework recovers and often improves the dequantization results on recommendation systems, principal component analysis, supervised clustering, support vector machines, low-rank regression, and semidefinite program solving. We also give additional dequantization results on low-rank Hamiltonian simulation and discriminant analysis. Our improvements come from identifying the key feature of the quantum-inspired input model that is at the core of all prior quantum-inspired results: l2-norm sampling can approximate matrix products in time independent of their dimension. We reduce all our main results to this fact, making our exposition concise, self-contained, and intuitive. Results are reported from a search for a class of composite dark matter models with feeble, long-range interactions with normal matter. We search for impulses arising from passing dark matter particles by monitoring the mechanical motion of an optically levitated nanogram mass over the course of several days. Assuming such particles constitute the dominant component of dark matter, this search places upper limits on their interaction with neutrons of αn\≤1.2\×10\−7 at 95\% confidence for dark matter masses between 1--10 TeV and mediator masses mφ\≤0.1 eV. Due to the large enhancement of the cross-section for dark matter to coherently scatter from a nanogram mass (\∼1029 times that for a single neutron) and the ability to detect momentum transfers as small as \∼200 MeV/c, these results provide sensitivity to certain classes of composite dark matter models that substantially exceeds existing searches, including those employing kg-scale or ton-scale targets. Extensions of these techniques can enable directionally-sensitive searches for a broad class of previously inaccessible heavy dark matter candidates.\ Secure two-party computation considers the problem of two parties computing a joint function of their private inputs without revealing anything beyond the output of the computation. In this work, we take the first steps towards understanding the setting in which the two parties want to evaluate a joint quantum functionality while using only a classical channel between them. Our first result indicates that it is in general impossible to realize a two-party quantum functionality against malicious adversaries with black-box simulation, relying only on classical channels. The negative result stems from reducing the existence of a black-box simulator to an extractor for classical proof of quantum knowledge, which in turn leads to violation of the quantum no-cloning. Next, we introduce the notion of oblivious quantum function evaluation (OQFE). An OQFE is a two-party quantum cryptographic primitive with one fully classical party (Alice) whose input is (a classical description of a) quantum unitary, U, and a quantum party (Bob) whose input is a quantum state, ψ. In particular, Alice receives a classical output corresponding to the measurement of U(ψ) while Bob receives no output. In OQFE, Bob remains oblivious to Alice\&$\#$39;s input, while Alice learns nothing about ψ more than what can be learned from the output. We present two constructions, one secure against semi-honest parties and the other against malicious parties. Due to the no-go result mentioned above, we consider what is arguably the best possible notion obtainable in our model concerning malicious adversaries: one-sided simulation security. Our protocol relies on the assumption of injective homomorphic trapdoor OWFs, which in turn rely on the LWE problem. As a result, we put forward a first, simple and modular, construction of one-sided quantum two-party computation and quantum oblivious transfer over classical networks. Secure delegated quantum computing allows a computationally weak client to outsource an arbitrary quantum computation to an untrusted quantum server in a privacy-preserving manner. One of the promising candidates to achieve classical delegation of quantum computation is classical-client remote state preparation (RSPCC), where a client remotely prepares a quantum state using a classical channel. However, the privacy loss incurred by employing RSPCC as a sub-module is unclear. Strongly long-range interacting quantum systems---those with interactions decaying as a power-law 1/rα in the distance r on a D-dimensional lattice for α\≤D---have received significant interest in recent years. They are present in leading experimental platforms for quantum computation and simulation, as well as in theoretical models of quantum information scrambling and fast entanglement creation. Since no notion of locality is expected in such systems, a general understanding of their dynamics is lacking. As a first step towards rectifying this problem, we prove two new Lieb-Robinson-type bounds that constrain the time for signaling and scrambling in strongly long-range interacting systems, for which no tight bounds were previously known. Our first bound applies to systems mappable to free-particle Hamiltonians with long-range hopping, and is saturable for α\≤D/2. Our second bound pertains to generic long-range interacting spin Hamiltonians, and leads to a tight lower bound for the signaling time to extensive subsets of the system for all α\<D. This result also lower-bounds the scrambling time, and suggests a path towards achieving a tight scrambling bound that can prove the long-standing fast scrambling conjecture.\ We propose an efficient quantum algorithm for simulating the dynamics of general Hamiltonian systems. Our technique is based on a power series expansion of the time-evolution operator in its off-diagonal terms. The expansion decouples the dynamics due to the diagonal component of the Hamiltonian from the dynamics generated by its off-diagonal part, which we encode using the linear combination of unitaries technique. Our method has an optimal dependence on the desired precision and, as we illustrate, generally requires considerably fewer resources than the current state-of-the-art. We provide an analysis of resource costs for several sample models. Limited quantum memory is one of the most important constraints for near-term quantum devices. Understanding whether a small quantum computer can simulate a larger quantum system, or execute an algorithm requiring more qubits than available, is both of theoretical and practical importance. In this Letter, we introduce cluster parameters K and d of a quantum circuit. The tensor network of such a circuit can be decomposed into clusters of size at most d with at most K qubits of inter-cluster quantum communication. Our main result is a simulation scheme of any (K,d)-clustered quantum circuit on a d-qubit machine in time roughly 2O(K). An important application of our result is the simulation of clustered quantum systems---such as large molecules---that can be partitioned into multiple significantly smaller clusters with weak interactions among them. Another potential application is quantum optimization: we demonstrate numerically that variational quantum eigensolvers can still perform well when restricted to clustered circuits, thus making it feasible to study large quantum systems on small quantum devices. We study a sparse version of the Sachdev-Ye-Kitaev (SYK) model defined on random hypergraphs constructed either by a random pruning procedure or by randomly sampling regular hypergraphs. The resulting model has a new parameter, k, defined as the ratio of the number of terms in the Hamiltonian to the number of degrees of freedom, with the sparse limit corresponding to the thermodynamic limit at fixed k. We argue that this sparse SYK model recovers the interesting global physics of ordinary SYK even when k is of order unity. In particular, at low temperature the model exhibits a gravitational sector which is maximally chaotic. Our argument proceeds by constructing a path integral for the sparse model which reproduces the conventional SYK path integral plus gapped fluctuations. The sparsity of the model permits larger scale numerical calculations than previously possible, the results of which are consistent with the path integral analysis. Additionally, we show that the sparsity of the model considerably reduces the cost of quantum simulation algorithms. This makes the sparse SYK model the most efficient currently known route to simulate a holographic model of quantum gravity. We also define and study a sparse supersymmetric SYK model, with similar conclusions to the non-supersymmetric case. Looking forward, we argue that the class of models considered here constitute an interesting and relatively unexplored sparse frontier in quantum many-body physics. Motivated by recent experiments on Mott insulators, in both iridates and ultracold atoms, we theoretically study the effects of magnetic order on the Mott-Hubbard excitons. In particular, we focus on spin-mediated doublon-holon pairing in Hubbard materials. We use several complementary theoretical techniques: mean-field theory to describe the spin degrees of freedom, the self-consistent Born approximation to characterize individual charge excitations across the Hubbard gap, and the Bethe-Salpeter equation to identify bound states of doublons and holons. The binding energy of the Hubbard exciton is found to increase with increasing the N{{\'e}}el order parameter, while the exciton mass decreases. We observe that these trends rely significantly on the retardation of the effective interaction, and require consideration of multiple effects from changing the magnetic order. Our results are consistent with the key qualitative trends observed in recent experiments on iridates. Moreover, the findings could have direct implications on ultracold atom Mott insulators, where the Hubbard model is the exact description of the system and the microscopic degrees of freedom can be directly accessed.\ The National Institute of Standards and Technology is in the process of selecting one or more public-key cryptographic algorithms through a public, competition-like process. The new public-key cryptography standards will specify one or more additional digital signatures, public-key encryption, and key-establishment algorithms to augment Federal Information Processing Standard (FIPS) 186-4, Digital Signature Standard (DSS), as well as NIST Special Publication (SP) 800-56A Revision 3, Recommendation for Pair-Wise Key-Establishment Schemes Using Discrete Logarithm Cryptography, and SP 800-56B Revision 2, Recommendation for Pair-Wise Key Establishment Using Integer Factorization Cryptography. It is intended that these algorithms will be capable of protecting sensitive information well into the foreseeable future, including after the advent of quantum computers. The NIST Post-Quantum Cryptography Standardization Process began in 2017 with 69 candidate algorithms that met both the minimum acceptance criteria and submission requirements. The first round lasted until January 2019, during which candidate algorithms were evaluated based on their security, performance, and other characteristics. NIST selected 26 algorithms to advance to the second round for more analysis. This report describes the evaluation and selection process, based on public feedback and internal review, of the second-round candidates. The report summarizes the 26 second-round candidate algorithms and identifies those selected to move forward to the third round of the competition. The third-round finalist public-key encryption and key-establishment algorithms are Classic McEliece, CRYSTALS-KYBER, NTRU, and SABER. The third-round finalists for digital signatures are CRYSTALS-DILITHIUM, FALCON, and Rainbow. These finalists will be considered for standardization at the end of the third round. In addition, eight alternate candidate algorithms will also advance to the third round: BIKE, FrodoKEM, HQC, NTRU Prime, SIKE, GeMSS, Picnic, and SPHINCS+. These additional candidates are still being considered for standardization, although this is unlikely to occur at the end of the third round. NIST hopes that the announcement of these finalists and additional candidates will serve to focus the cryptographic community\’s attention during the next round. We study Eigen\&$\#$39;s model of quasi-species, characterized by sequences that replicate with a specified fitness and mutate independently at single sites. The evolution of the population vector in time is then closely related to that of quantum spins in imaginary time. We employ multiple perspectives and tools from interacting quantum systems to examine growth and collapse of realistic viral populations, specifically certain HIV proteins. All approaches used, including the simplest perturbation theory, give consistent results. We investigate sublinear classical and quantum algorithms for matrix games, a fundamental problem in optimization and machine learning, with provable guarantees. Given a matrix A\∈Rn\×d, sublinear algorithms for the matrix game minx\∈Xmaxy\∈Yy⊤Ax were previously known only for two special cases: (1) Y being the l1-norm unit ball, and (2) X being either the l1- or the l2-norm unit ball. We give a sublinear classical algorithm that can interpolate smoothly between these two cases: for any fixed q\∈(1,2], we solve the matrix game where X is a lq-norm unit ball within additive error ε in time O~((n+d)/ε2). We also provide a corresponding sublinear quantum algorithm that solves the same task in time O~((n\−\−\√+d\−\−\√)poly(1/ε)) with a quadratic improvement in both n and d. Both our classical and quantum algorithms are optimal in the dimension parameters n and d up to poly-logarithmic factors. Finally, we propose sublinear classical and quantum algorithms for the approximate Carath{\'e}odory problem and the lq-margin support vector machines as applications. Aaronson and Ambainis (2009) and Chailloux (2018) showed that fully symmetric (partial) functions do not admit exponential quantum query speedups. This raises a natural question: how symmetric must a function be before it cannot exhibit a large quantum speedup? Symmetry-breaking transitions are a well-understood phenomenon of closed quantum systems in quantum optics, condensed matter, and high energy physics. However, symmetry breaking in open systems is less thoroughly understood, in part due to the richer steady-state and symmetry structure that such systems possess. For the prototypical open system---a Lindbladian---a unitary symmetry can be imposed in a \"weak\" or a \"strong\" way. We characterize the possible Zn symmetry breaking transitions for both cases. In the case of Z2, a weak-symmetry-broken phase guarantees at most a classical bit steady-state structure, while a strong-symmetry-broken phase admits a partially-protected steady-state qubit. Viewing photonic cat qubits through the lens of strong-symmetry breaking, we show how to dynamically recover the logical information after any gap-preserving strong-symmetric error; such recovery becomes perfect exponentially quickly in the number of photons. Our study forges a connection between driven-dissipative phase transitions and error correctio The multi-scale entanglement renormalization ansatz (MERA) postulates the existence of quantum circuits that renormalize entanglement in real space at different length scales. Chern insulators, however, cannot have scale-invariant discrete MERA circuits with finite bond dimension. In this Letter, we show that the continuous MERA (cMERA), a modified version of MERA adapted for field theories, possesses a fixed point wavefunction with nonzero Chern number. Additionally, it is well known that reversed MERA circuits can be used to prepare quantum states efficiently in time that scales logarithmically with the size of the system. However, state preparation via MERA typically requires the advent of a full-fledged universal quantum computer. In this Letter, we demonstrate that our cMERA circuit can potentially be realized in existing analog quantum computers, i.e., an ultracold atomic Fermi gas in an optical lattice with light-induced spin-orbit coupling.\ In a recent breakthrough, Bravyi, Gosset and K{\"o}nig (BGK) [Science, 2018] proved that \"simulating\" constant depth quantum circuits takes classical circuits Ω(logn) depth. In our paper, we first formalise their notion of simulation, which we call \"possibilistic simulation\". Then, from well-known results, we deduce that their circuits can be simulated in depth O(log2n). Separately, we construct explicit classical circuits that can simulate any depth-d quantum circuit with Clifford and t T-gates in depth O(d+t). Our classical circuits use {NOT, AND, OR} gates of fan-in \≤2. Answering whether quantum computers can efficiently simulate quantum field theories has both theoretical and practical motivation. From the theoretical point of view, it answers the question of whether a hypothetical computer that utilizes quantum field theory would be more powerful than other quantum computers. From the practical point of view, when reliable quantum computers are eventually built, these algorithms can help us better understand the underlying physics that govern our world. In the best known quantum algorithms for simulating quantum field theories, the time scaling is dominated by initial state preparation. In this paper, we exclusively focus on state preparation and present a heuristic algorithm that can prepare the vacuum of fermionic systems in more general cases and more efficiently than previous methods. With our method, state preparation is no longer the bottleneck, as its runtime has the same asymptotic scaling with the desired precision as the remainder of the simulation algorithm. We numerically demonstrate the effectiveness of our proposed method for the 1+1 dimensional Gross-Neveu model. We present a general theory of quantum information propagation in chaotic quantum many-body systems. The generic expectation in such systems is that quantum information does not propagate in localized form; instead, it tends to spread out and scramble into a form that is inaccessible to local measurements. To characterize this spreading, we define an information speed via a quench-type experiment and derive a general formula for it as a function of the entanglement density of the initial state. As the entanglement density varies from zero to one, the information speed varies from the entanglement speed to the butterfly speed. We verify that the formula holds both for a quantum chaotic spin chain and in field theories with an AdS/CFT gravity dual. For the second case, we study in detail the dynamics of entanglement in two-sided Vaidya-AdS-Reissner-Nordstrom black branes. We also show that, with an appropriate decoding process, quantum information can be construed as moving at the information speed, and, in the case of AdS/CFT, we show that a locally detectable signal propagates at the information speed in a spatially local variant of the traversable wormhole setup. This paper proposes a privacy protocol for distributed average consensus algorithms on bounded real-valued inputs that guarantees statistical privacy of honest agents\&$\#$39; inputs against colluding (passive adversarial) agents, if the set of colluding agents is not a vertex cut in the underlying communication network. This implies that privacy of agents\&$\#$39; inputs is preserved against t number of arbitrary colluding agents if the connectivity of the communication network is at least (t+1). A similar privacy protocol has been proposed for the case of bounded integral inputs in our previous paper~\cite{gupta2018information}. However, many applications of distributed consensus concerning distributed control or state estimation deal with real-valued inputs. Thus, in this paper we propose an extension of the privacy protocol in~\cite{gupta2018information}, for bounded real-valued agents\&$\#$39; inputs, where bounds are known apriori to all the agents.\ The National Institute of Standards and Technology is in the process of selecting one or more We investigate quantum algorithms for classification, a fundamental problem in machine learning, with provable guarantees. Given n d-dimensional data points, the state-of-the-art (and optimal) classical algorithm for training classifiers with constant margin runs in O~(n+d) time. We design sublinear quantum algorithms for the same task running in O~(n\−\−\√+d\−\−\√) time, a quadratic improvement in both n and d. Moreover, our algorithms use the standard quantization of the classical input and generate the same classical output, suggesting minimal overheads when used as subroutines for end-to-end applications. We also demonstrate a tight lower bound (up to poly-log factors) and discuss the possibility of implementation on near-term quantum machines. As a side result, we also give sublinear quantum algorithms for approximating the equilibria of n-dimensional matrix zero-sum games with optimal complexity Θ~(n\−\−\√).\ We study quantum information scrambling, specifically the growth of Heisenberg operators, in large disordered spin chains using matrix product operator dynamics to scan across the thermalization-localization quantum phase transition. We observe ballistic operator growth for weak disorder, and a sharp transition to a phase with sub-ballistic operator spreading. The critical disorder strength for the ballistic to sub-ballistic transition is well below the many body localization phase transition, as determined from finite size scaling of energy eigenstate entanglement entropy in small chains. In contrast, we find that the operator dynamics is not very sensitive to the actual eigenstate localization transition. These data are discussed in the context of a universal form for the growing operator shape and substantiated with a simple phenomenological model of rare regions. We study the phase-space representation of dynamics of bosons in the semiclassical regime where the occupation number of the modes is large. To this end, we employ the van Vleck-Gutzwiller propagator to obtain an approximation for the Green\&$\#$39;s function of the Wigner distribution. The semiclassical analysis incorporates interference of classical paths and reduces to the truncated Wigner approximation (TWA) when the interference is ignored. Furthermore, we identify the Ehrenfest time after which the TWA fails. As a case study, we consider a single-mode quantum nonlinear oscillator, which displays collapse and revival of observables. We analytically show that the interference of classical paths leads to revivals, an effect that is not reproduced by the TWA or a perturbative analysis. We illustrate the existence of single-excitation bound states for propagating photons interacting with N two-level atoms. These bound states can be calculated from an effective spin model, and their existence relies on dissipation in the system. The appearance of these bound states is in a one-to-one correspondence with zeros in the single-photon transmission and with divergent bunching in the second-order photon-photon correlation function. We also formulate a dissipative version of Levinson\&$\#$39;s theorem for this system by looking at the relation between the number of bound states and the winding number of the transmission phases. This theorem allows a direct experimental measurement of the number of bound states using the measured transmission phases. We show that Ramsey spectroscopy of fermionic alkaline-earth atoms in a square-well trap provides an efficient and accurate estimate for the eigenspectrum of a density matrix whose n copies are stored in the nuclear spins of n such atoms. This spectrum estimation is enabled by the high symmetry of the interaction Hamiltonian, dictated, in turn, by the decoupling of the nuclear spin from the electrons and by the shape of the square-well trap. Practical performance of this procedure and its potential applications to quantum computing, quantum simulation, and time-keeping with alkalineearth atoms are discussed. The SU(1,1) interferometer was originally conceived as a Mach-Zehnder interferometer with the beam-splitters replaced by parametric amplifiers. The parametric amplifiers produce states with correlations that result in enhanced phase sensitivity. F=1 spinor Bose-Einstein condensates (BECs) can serve as the parametric amplifiers for an atomic version of such an interferometer by collisionally producing entangled pairs of \〈F=1,m=\±1| atoms. We simulate the effect of single and double-sided seeding of the inputs to the amplifier using the truncated-Wigner approximation. We find that single-sided seeding degrades the performance of the interferometer exactly at the phase the unseeded interferometer should operate the best. Double-sided seeding results in a phase-sensitive amplifier, where the maximal sensitivity is a function of the phase relationship between the input states of the amplifier. In both single and double-sided seeding we find there exists an optimal phase shift that achieves sensitivity beyond the standard quantum limit. Experimentally, we demonstrate a spinor phase-sensitive amplifier using a BEC of 23Na in an optical dipole trap. This configuration could be used as an input to such an interferometer. We are able to control the initial phase of the double-seeded amplifier, and demonstrate sensitivity to initial population fractions as small as 0.1\%.\ We apply the periodized stationary phase method to discrete Wigner functions of systems with odd prime dimension using results from p-adic number theory. We derive the Wigner-Weyl-Moyal (WWM) formalism with higher order ℏ corrections representing contextual corrections to non-contextual Clifford operations. We apply this formalism to a subset of unitaries that include diagonal gates such as the π8 gates. We characterize the stationary phase critical points as a quantum resource injecting contextuality and show that this resource allows for the replacement of the p2t points that represent t magic state Wigner functions on p-dimensional qudits by \≤pt points. We find that the π8 gate introduces the smallest higher order ℏ correction possible, requiring the lowest number of additional critical points compared to the Clifford gates. We then establish a relationship between the stabilizer rank of states and the number of critical points necessary to treat them in the WWM formalism. This allows us to exploit the stabilizer rank decomposition of two qutrit π8 gates to develop a classical strong simulation of a single qutrit marginal on t qutrit π8 gates that are followed by Clifford evolution, and show that this only requires calculating 3t2+1 critical points corresponding to Gauss sums. This outperforms the best alternative qutrit algorithm (based on Wigner negativity and scaling as \∼30.8t for 10\−2 precision) for any number of π8 gates to full precision. We consider the general form of \"Correlated Worldline\" (CWL) theories of quantum gravity. We show that one can have 2 different kinds of CWL theory, in which the generating functional is written as either a sum or a product over multiple copies of the coupled matter and gravitational fields. In both versions, the paths in a functional formulation are correlated via gravity itself, causing a breakdown of the superposition principle; however, the product form survives consistency tests not satisfied by the summed form. To better understand the structure of these two theories, we show how to perform diagrammatic expansions in the gravitational coupling for each version of CWL theory, using particle propagation and scalar fields as examples. We explicitly calculate contributions to 2-point and 4-point functions, again for each version of the theory, up to 2nd-order in the gravitational coupling. The noble elements, argon and xenon, are frequently employed as the target and event detector for weakly interacting particles such as neutrinos and Dark Matter. For such rare processes, background radiation must be carefully minimized. Radon provides one of the most significant contaminants since it is an inevitable product of trace amounts of natural uranium. To design a purification system for reducing such contamination, the adsorption characteristics of radon in nitrogen, argon, and xenon carrier gases on various types of charcoals with different adsorbing properties and intrinsic radioactive purities have been studied in the temperature range of 190-295 K at flow rates of 0.5 and 2 standard liters per minute. Essential performance parameters for the various charcoals include the average breakthrough times ( Research shows that community plays a central role in learning, and strong community engages students and aids in student persistence. Thus, understanding the function and structure of communities in learning environments is essential to education. We use social network analysis to explore the community dynamics of students in a pre-matriculation, two-week summer program. Unlike previous network analysis studies in PER, we build our networks from classroom video that has been coded for student interactions using labeled, directed ties. We define 3 types of interaction: on task interactions (regarding the assigned task), on topic interactions (having to do with science, technology, engineering, and mathematics (STEM)), and off topic interactions (unrelated to the assignment or STEM). To study the development of community in this program, we analyze the shift in conversation topicality over the course of the program. Conversations are more on-task toward the end of the program and we propose that this conversational shift represents a change in student membership within their forming community.\ We study circuit complexity for spatial regions in holographic field theories. We study analogues based on the entanglement wedge of the bulk quantities appearing in the \"complexity = volume\" and \"complexity = action\" conjectures. We calculate these quantities for one exterior region of an eternal static neutral or charged black hole in general dimensions, dual to a thermal state on one boundary with or without chemical potential respectively, as well as for a shock wave geometry. We then define several analogues of circuit complexity for mixed states, and use tensor networks to gain intuition about them. We find a promising qualitative match between the holographic action and what we call the purification complexity, the minimum number of gates required to prepare an arbitrary purification of the given mixed state. On the other hand, the holographic volume does not appear to match any of our definitions of mixed-state complexity. We describe two procedures which, given access to one copy of a quantum state and a sequence of two-outcome measurements, can distinguish between the case that at least one of the measurements accepts the state with high probability, and the case that all of the measurements have low probability of acceptance. The measurements cannot simply be tried in sequence, because early measurements may disturb the state being tested. One procedure is based on a variant of Marriott-Watrous amplification. The other procedure is based on the use of a test for this disturbance, which is applied with low probability. We find a number of applications. First, quantum query complexity separations in the property testing model for testing isomorphism of functions under group actions. We give quantum algorithms for testing isomorphism, linear isomorphism and affine isomorphism of boolean functions which use exponentially fewer queries than is possible classically, and a quantum algorithm for testing graph isomorphism which uses polynomially fewer queries than the best algorithm known. Second, testing properties of quantum states and operations. We show that any finite property of quantum states can be tested using a number of copies of the state which is logarithmic in the size of the property, and give a test for genuine multipartite entanglement of states of n qubits that uses O(n) copies of the state. Third, correcting an error in a result of Aaronson on de-Merlinizing quantum protocols. This result claimed that, in any one-way quantum communication protocol where two parties are assisted by an all-powerful but untrusted third party, the third party can be removed with only a modest increase in the communication cost. We give a corrected proof of a key technical lemma required for Aaronson\&$\#$39;s result. In this paper we improve the layered implementation of arbitrary stabilizer circuits introduced by Aaronson and Gottesman in Phys. Rev. A 70(052328), 2004: to implement a general stabilizer circuit, we reduce their 11-stage computation -HC-P-C-P-C-H-P-C-P-C- over the gate set consisting of Hadamard, Controlled-NOT, and Phase gates, into a 7-stage computation of the form -C-CZ-P-H-P-CZ-C-. We show arguments in support of using -CZ- stages over the -C- stages: not only the use of -CZ- stages allows a shorter layered expression, but -CZ- stages are simpler and appear to be easier to implement compared to the -C- stages. Based on this decomposition, we develop a twoqubit gate depth-(14n\−4) implementation of stabilizer circuits over the gate library {H, P, CNOT}, executable in the LNN architecture, improving best previously known depth-25n circuit, also executable in the LNN architecture. Our constructions rely on Bruhat decomposition of the symplectic group and on folding arbitrarily long sequences of the form (-P-C-) m into a 3-stage computation -P-CZ-C-. Our results include the reduction of the 11-stage decomposition -H-C-P-C-P-C-H-P-C-P-C- into a 9-stage decomposition of the form -C-P-C-P-H-C-P-C-P-. This reduction is based on the Bruhat decomposition of the symplectic group. This result also implies a new normal form for stabilizer circuits. We show that a circuit in this normal form is optimal in the number of Hadamard gates used. We also show that the normal form has an asymptotically optimal number of parameters. As single-photon sources become more mature and are used more often in quantum information, communications, and measurement applications, their characterization becomes more important. Singlephoton-like light is often characterized by its brightness, as well as two quantum properties: the suppression of multiphoton content and the photon indistinguishability. While it is desirable to obtain these quantities from a single measurement, currently two or more measurements are required. Here, we show that using two-photon (n \¼ 2) number-resolving detectors, one can completely characterize single-photon-like states in a single measurement, where previously two or more measurements were necessary. We simultaneously determine the brightness, the suppression of multiphoton states, the indistinguishability, and the statistical distribution of Fock states to third order for a quantum light source. We find n \≥ 3 number-resolving detectors provide no additional advantage in the single-photon characterization. The new method extracts more information per experimental trial than a conventional measurement for all input states and is particularly more efficient for statistical mixtures of photon states. Thus, using this n \¼ 2, number-resolving detector scheme will provide advantages in a variety of quantum optics measurements and systems. Exactly solvable models have played an important role in establishing the sophisticated modern understanding of equilibrium many-body physics. And conversely, the relative scarcity of solutions for non-equilibrium models greatly limits our understanding of systems away from thermal equilibrium. We study a family of nonequilibrium models, some of which can be viewed as dissipative analogues of the transverse-field Ising model, in that an effectively classical Hamiltonian is frustrated by dissipative processes that drive the system toward states that do not commute with the Hamiltonian. Surprisingly, a broad and experimentally relevant subset of these models can be solved efficiently in any number of spatial dimensions. We leverage these solutions to prove a no-go theorem on steady-state phase transitions in a many-body model that can be realized naturally with Rydberg atoms or trapped ions, and to compute the effects of decoherence on a canonical trapped-ion-based quantum computation architecture. In this paper we introduce and formalize Substochastic Monte Carlo (SSMC) algorithms. These algorithms, originally intended to be a better classical foil to quantum annealing than simulated annealing, prove to be worthy optimization algorithms in their own right. In SSMC, a population of walkers is initialized according to a known distribution on an arbitrary search space and varied into the solution of some optimization problem of interest. The first argument of this paper shows how an existing classical algorithm, \"Go-With-The-Winners\" (GWW), is a limiting case of SSMC when restricted to binary search and particular driving dynamics.\ Recent work on quantum machine learning has demonstrated that quantum computers can offer dramatic improvements over classical devices for data mining, prediction and classification. However, less is known about the advantages using quantum computers may bring in the more general setting of reinforcement learning, where learning is achieved via interaction with a task environment that provides occasional rewards. Reinforcement learning can incorporate data-analysis-oriented learning settings as special cases, but also includes more complex situations where, e.g., reinforcing feedback is delayed. In a few recent works, Grover-type amplification has been utilized to construct quantum agents that achieve up-to-quadratic improvements in learning efficiency. These encouraging results have left open the key question of whether super-polynomial improvements in learning times are possible for genuine reinforcement learning problems, that is problems that go beyond the other more restricted learning paradigms. In this work, we provide a family of such genuine reinforcement learning tasks. We construct quantum-enhanced learners which learn super-polynomially, and even exponentially faster than any classical reinforcement learning model, and we discuss the potential impact our results may have on future technologies. Tightly confined modes of light, as in optical nanofibers or photonic crystal waveguides, can lead to large optical coupling in atomic systems, which mediates long-range interactions between atoms. These one-dimensional systems can naturally possess couplings that are asymmetric between modes propagating in different directions. Strong long-range interaction among atoms via these modes can drive them to a self-organized periodic distribution. In this paper, we examine the self-organizing behavior of atoms in one dimension coupled to a chiral reservoir. We determine the solution to the equations of motion in different parameter regimes, relative to both the detuning of the pump laser that initializes the atomic dipole-dipole interactions and the degree of reservoir chirality. In addition, we calculate possible experimental signatures such as reflectivity from self-organized atoms and motional sidebands. Advances in single photon creation, transmission, and detection suggest that sending quantum information over optical fibers may have losses low enough to be correctable using a quantum error correcting code. Such error-corrected communication is equivalent to a novel quantum repeater scheme, but crucial questions regarding implementation and system requirements remain open. Here we show that long range entangled bit generation with rates approaching $10^8$ ebits/s may be possible using a completely serialized protocol, in which photons are generated, entangled, and error corrected via sequential, one-way interactions with a minimal number of matter qubits. Provided loss and error rates of the required elements are below the threshold for quantum error correction, this scheme demonstrates improved performance over transmission of single photons. We find improvement in ebit rates at large distances using this serial protocol and various quantum error correcting codes. A strongly driven quantum system, coupled to a thermalizing bath, generically evolves into a highly non-thermal state as the external drive competes with the equilibrating force of the bath. We demonstrate a notable exception to this picture for a microwave resonator interacting with a periodically driven double quantum dot (DQD). In the limit of strong driving and long times, we show that the resonator field can be driven into a thermal state with a chemical potential given by a harmonic of the drive frequency. Such tunable chemical potentials are achievable with current devices and would have broad utility for quantum simulation in circuit quantum electrodynamics. As an example, we show how several DQDs embedded in an array of microwave resonators can induce a phase transition to a Bose-Einstein condensate of light. This paper develops general space-efficient methods for error reduction for unitary quantum computation. Consider a polynomial-time quantum computation with completeness\ A steady-state superradiant laser can be used to generate ultranarrow-linewidth light, and thus has important applications in the fields of quantum information and precision metrology. However, the light produced by such a laser is still essentially classical. Here, we show that the introduction of a Rydberg medium into a cavity containing atoms with a narrow optical transition can lead to the steady-state superradiant emission of ultranarrow-linewidth\ We propose a method for creating far-field optical barrier potentials for ultracold atoms with widths that are narrower than the diffraction limit and can approach tens of nanometers. The reduced widths stem from the nonlinear atomic response to control fields that create spatially varying dark resonances. The subwavelength barrier is the result of the geometric scalar potential experienced by an atom prepared in such a spatially varying dark state. The performance of this technique, as well as its applications to the study of many-body physics and to the implementation of quantum-information protocols with ultracold atoms, are discussed, with a focus on the implementation of tunnel junctions. We determine the exact time evolution of an initial Bardeen-Cooper-Schrieffer (BCS) state of ultra-cold atoms in a hexagonal optical lattice. The dynamical evolution is triggered by ramping the lattice potential up, such that the interaction strength Uf is much larger than the hopping amplitude Jf. The quench initiates collective oscillations with frequency |Uf|/(2π) in the momentum occupation numbers and imprints an oscillating phase with the same frequency on the order parameter Δ. The latter is not reproduced by treating the time evolution in mean-field theory. The momentum density-density or noise correlation functions oscillate at frequency |Uf|/2π as well as its second harmonic. For a very deep lattice, with negligible tunneling energy, the oscillations of momentum occupation numbers are undamped. Non-zero tunneling after the quench leads to dephasing of the different momentum modes and a subsequent damping of the oscillations. This occurs even for a finite-temperature initial BCS state, but not for a non-interacting Fermi gas. We therefore propose to use this dephasing to detect a BCS state. Finally, we predict that the noise correlation functions in a honeycomb lattice will develop strong anti-correlations near the Dirac point. It is well-known that deciding equivalence of logic circuits is a coNP-complete problem. As a corollary, the problem of deciding weak equivalence of reversible circuits, i.e. allowing initialized ancilla bits in the input and ignoring \"garbage\" ancilla bits in the output, is also coNP-complete. The complexity of deciding strong equivalence, including the ancilla bits, is less obvious and may depend on gate set. Here we use Barrington\&$\#$39;s theorem to show that deciding strong equivalence of reversible circuits built from the Fredkin gate is coNP-complete. This implies coNP-completeness of deciding strong equivalence for other commonly used universal reversible gate sets, including any gate set that includes the Toffoli or Fredkin gate. The problem of understanding the Fourier-analytic structure of the cone of We present a negative result regarding the hidden subgroup problem on the powers Gn of a fixed group G. Under a condition on the base group G, we prove that strong Fourier sampling cannot distinguish some subgroups of Gn. Since strong sampling is in fact the optimal measurement on a coset state, this shows that we have no hope of efficiently solving the hidden subgroup problem over these groups with separable measurements on coset states (that is, using any polynomial number of single-register coset state experiments). Base groups satisfying our condition include all nonabelian simple groups. We apply our results to show that there exist uniform families of nilpotent groups whose normal series factors have constant size and yet are immune to strong Fourier sampling.
In this work, we investigate this question using the Constructive Cryptography framework by Maurer and Renner (ICS\&$\#$39;11). We first identify the goal of RSPCC as the construction of ideal RSP resources from classical channels and then reveal the security limitations of using RSPCC. First, we uncover a fundamental relationship between constructing ideal RSP resources (from classical channels) and the task of cloning quantum states. Any classically constructed ideal RSP resource must leak to the server the full classical description (possibly in an encoded form) of the generated quantum state, even if we target computational security only. As a consequence, we find that the realization of common RSP resources, without weakening their guarantees drastically, is impossible due to the no-cloning theorem. Second, the above result does not rule out that a specific RSPCC protocol can replace the quantum channel at least in some contexts, such as the Universal Blind Quantum Computing (UBQC) protocol of Broadbent et al. (FOCS \&$\#$39;09). However, we show that the resulting UBQC protocol cannot maintain its proven composable security as soon as RSPCC is used as a subroutine. Third, we show that replacing the quantum channel of the above UBQC protocol by the RSPCC protocol QFactory of Cojocaru et al. (Asiacrypt \&$\#$39;19), preserves the weaker, game-based, security of UBQC.
In this work, we prove that hypergraph symmetries in the adjacency matrix model allow at most a polynomial separation between randomized and quantum query complexities. We also show that, remarkably, permutation groups constructed out of these symmetries are essentially the only permutation groups that prevent super-polynomial quantum speedups. We prove this by fully characterizing the primitive permutation groups that allow super-polynomial quantum speedups.
In contrast, in the adjacency list model for bounded-degree graphs (where graph symmetry is manifested differently), we exhibit a property testing problem that shows an exponential quantum speedup. These results resolve open questions posed by Ambainis, Childs, and Liu (2010) and Montanaro and de Wolf (2013).
public-key cryptographic algorithms through a public competition-like process. The new publickey cryptography standards will specify one or more additional digital signature, public-key
encryption, and key-establishment algorithms to augment FIPS 186-4, Digital Signature Standard
(DSS), as well as special publications SP 800-56A Revision 2, Recommendation for Pair-Wise
Key Establishment Schemes Using Discrete Logarithm Cryptography, and SP 800-56B,
Recommendation for Pair-Wise Key-Establishment Schemes Using Integer Factorization. It is
intended that these algorithms will be capable of protecting sensitive information well into the
foreseeable future, including after the advent of quantum computers.
In November 2017, 82 candidate algorithms were submitted to NIST for consideration. Among
these, 69 met both the minimum acceptance criteria and our submission requirements, and were
accepted as First-Round Candidates on Dec. 20, 2017, marking the beginning of the First Round
of the NIST Post-Quantum Cryptography Standardization Process. This report describes the
evaluation criteria and selection process, based on public feedback and internal review of the
first-round candidates, and summarizes the 26 candidate algorithms announced on January 30,
2019 for moving forward to the second round of the competition. The 17 Second-Round
Candidate public-key encryption and key-establishment algorithms are BIKE, Classic McEliece,
CRYSTALS-KYBER, FrodoKEM, HQC, LAC, LEDAcrypt (merger of LEDAkem/LEDApkc),
NewHope, NTRU (merger of NTRUEncrypt/NTRU-HRSS-KEM), NTRU Prime, NTS-KEM,
ROLLO (merger of LAKE/LOCKER/Ouroboros-R), Round5 (merger of Hila5/Round2), RQC,
SABER, SIKE, and Three Bears. The 9 Second-Round Candidates for digital signatures are
CRYSTALS-DILITHIUM, FALCON, GeMSS, LUOV, MQDSS, Picnic, qTESLA, Rainbow,
and SPHINCS+.
Although limiting to GWW, SSMC is more general. We show that (1) GWW can be efficiently simulated within the SSMC framework, (2) SSMC can be exponentially faster than GWW, (3) by naturally incorporating structural information, SSMC can exponentially outperform the quantum algorithm that first inspired it, and (4) SSMC exhibits desirable search features in general spaces. Our approach combines ideas from genetic algorithms (GWW), theoretical probability (Fleming-Viot processes), and quantum computing. Not only do we demonstrate that SSMC is often more efficient than competing algorithms, but we also hope that our results connecting these disciplines will impact each independently. An implemented version of SSMC has previously enjoyed some success as a competitive optimization algorithm for Max-
positive functions on a group has a long history. In this article, we develop the first
quantitative spectral concentration results for such functions over arbitrary compact
groups. Specifically, we describe a family of finite, positive quadrature rules for the
Fourier coefficients of band-limited functions on compact groups. We apply these
quadrature rules to establish a spectral concentration result for positive functions:
given appropriately nested band limits A \⊂ B \⊂ G, we prove a lower bound on the
fraction of L2-mass that any B-band-limited positive function has in A. Our bounds
are explicit and depend only on elementary properties of A and B; they are the first
such bounds that apply to arbitrary compact groups. They apply to finite groups as
a special case, where the quadrature rule is given by the Fourier transform on the
smallest quotient whose dual contains the Fourier support of the function.