@article{THESIS,
      recid = {12686},
      author = {Lykov, Danylo},
      title = {Large-Scale Classical Simulations for Development of  Quantum Optimization Algorithms},
      publisher = {University of Chicago},
      school = {Ph.D.},
      address = {2024-08},
      number = {THESIS},
      abstract = {As quantum computing field is starting to reach the realm  of advantage over classical algorithms,  simulating quantum  circuits becomes increasingly challenging as we design and  evaluate more complex quantum algorithms. In this context,  tensor networks, which have become a standard method for  simulations in various areas of physics, from many-body  quantum physics to quantum gravity, offer a natural  approach. Despite the availability of efficient tools for  physics simulations, simulating quantum circuits presents  unique challenges, which I address in this work,  specifically using the Quantum Approximate Optimization  Algorithm as an example. For large-scale contraction of  tensor networks, I propose a step-dependent parallelization  approach which performs slicing of partially contracted  tensor networks. I also study tensor network contractions  on GPU and propose an algorithm of contraction which uses  both CPU and GPU to reduce GPU overhead. In our benchmarks,  this algorithm reduces time to solution from 6 seconds to  1.5-2 seconds. These improvements allow to predict the  performance of Quantum Approximate Optimization Algorithm  (QAOA) on MaxCut problem without running the quantum  optimization. This predicted QAOA performance is then used  to systematically compare to the classical MaxCut solvers.  It turns out that one needs to simulate a deep quantum  circuit in order to compete with classical solvers. In  search for provable speedups, I then turn to a different  combinatorial optimization problem, Low-Autocorrelation  Binary Sequences (LABS). This study involves simulating  ground state overlap for deep circuits. This task is more  suited for a state vector simulation, which requires  advanced distributed algorithms when done at scale.},
      url = {http://knowledge.uchicago.edu/record/12686},
      doi = {https://doi.org/10.6082/uchicago.12686},
}