@InProceedings{RoyChaudhuri+Kalyanakrishnan:2020, author = {{Roy Chaudhuri}, Arghya and Kalyanakrishnan, Shivaram}, title = {Regret Minimisation in Multi-Armed Bandits Using Bounded Arm Memory}, booktitle = {Proceedings of the Thirty-fourth AAAI Conference on Artificial Intelligence (AAAI 2020)}, note = {To appear}, abstract= {Regret minimisation in stochastic multi-armed bandits is a well-studied problem, for which several optimal algorithms have been proposed. Such algorithms depend on (sufficient statistics of) the empirical reward distributions of the arms to decide which arm to pull next. In this paper, we consider the design of algorithms that are constrained to store statistics from only a bounded number of arms. For bandits with a finite set of arms, we derive a sub-linear upper bound on the regret that decreases with the “arm memory” size M. For instances with a large, possibly infinite, set of arms, we show a sub-linear bound on the quantile regret. Our problem formulation generalises that of Liau etal . (2018), who fix M = O(1), and so do not obtain bounds that depend on M. More importantly, our algorithms keep exploration and exploitation tightly coupled, without a dedicated exploration phase as employed by Liau et al. (2018). Although this choice makes our analysis harder, it leads to much-improved practical performance. For bandits with a large number of arms and no known structure on the rewards, our algorithms serve as a viable option. Unlike many other approaches to restrict the memory of bandit algorithms, our algorithms do not need any additional technical assumptions.} }