<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Archiving and Interchange DTD v1.2 20190208//EN" "JATS-archivearticle1.dtd">
<article xmlns:ali="http://www.niso.org/schemas/ali/1.0/" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" article-type="research-article" dtd-version="1.2"><front><journal-meta><journal-id journal-id-type="nlm-ta">elife</journal-id><journal-id journal-id-type="publisher-id">eLife</journal-id><journal-title-group><journal-title>eLife</journal-title></journal-title-group><issn pub-type="epub" publication-format="electronic">2050-084X</issn><publisher><publisher-name>eLife Sciences Publications, Ltd</publisher-name></publisher></journal-meta><article-meta><article-id pub-id-type="publisher-id">67490</article-id><article-id pub-id-type="doi">10.7554/eLife.67490</article-id><article-categories><subj-group subj-group-type="display-channel"><subject>Research Article</subject></subj-group><subj-group subj-group-type="heading"><subject>Neuroscience</subject></subj-group></article-categories><title-group><article-title>Non-linear dimensionality reduction on extracellular waveforms reveals cell type diversity in premotor cortex</article-title></title-group><contrib-group><contrib contrib-type="author" id="author-227704"><name><surname>Lee</surname><given-names>Eric Kenji</given-names></name><contrib-id authenticated="true" contrib-id-type="orcid">https://orcid.org/0000-0002-7166-0909</contrib-id><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="fn" rid="con1"/><xref ref-type="fn" rid="conf1"/></contrib><contrib contrib-type="author" id="author-227755"><name><surname>Balasubramanian</surname><given-names>Hymavathy</given-names></name><contrib-id authenticated="true" contrib-id-type="orcid">https://orcid.org/0000-0001-5371-2966</contrib-id><xref ref-type="aff" rid="aff2">2</xref><xref ref-type="fn" rid="con2"/><xref ref-type="fn" rid="conf1"/></contrib><contrib contrib-type="author" id="author-227756"><name><surname>Tsolias</surname><given-names>Alexandra</given-names></name><contrib-id authenticated="true" contrib-id-type="orcid">https://orcid.org/0000-0002-2956-3267</contrib-id><xref ref-type="aff" rid="aff3">3</xref><xref ref-type="fn" rid="con3"/><xref ref-type="fn" rid="conf1"/></contrib><contrib contrib-type="author" id="author-227754"><name><surname>Anakwe</surname><given-names>Stephanie Udochukwu</given-names></name><contrib-id authenticated="true" contrib-id-type="orcid">https://orcid.org/0000-0001-9236-6090</contrib-id><xref ref-type="aff" rid="aff4">4</xref><xref ref-type="fn" rid="con4"/><xref ref-type="fn" rid="conf1"/></contrib><contrib contrib-type="author" id="author-227705"><name><surname>Medalla</surname><given-names>Maria</given-names></name><contrib-id authenticated="true" contrib-id-type="orcid">https://orcid.org/0000-0003-4890-2532</contrib-id><xref ref-type="aff" rid="aff3">3</xref><xref ref-type="other" rid="fund4"/><xref ref-type="other" rid="fund5"/><xref ref-type="fn" rid="con5"/><xref ref-type="fn" rid="conf1"/></contrib><contrib contrib-type="author" id="author-131750"><name><surname>Shenoy</surname><given-names>Krishna V</given-names></name><contrib-id authenticated="true" contrib-id-type="orcid">https://orcid.org/0000-0003-1534-9240</contrib-id><xref ref-type="aff" rid="aff5">5</xref><xref ref-type="aff" rid="aff6">6</xref><xref ref-type="aff" rid="aff7">7</xref><xref ref-type="aff" rid="aff8">8</xref><xref ref-type="aff" rid="aff9">9</xref><xref ref-type="aff" rid="aff10">10</xref><xref ref-type="other" rid="fund3"/><xref ref-type="other" rid="fund8"/><xref ref-type="other" rid="fund9"/><xref ref-type="other" rid="fund10"/><xref ref-type="other" rid="fund11"/><xref ref-type="other" rid="fund12"/><xref ref-type="other" rid="fund13"/><xref ref-type="other" rid="fund14"/><xref ref-type="other" rid="fund15"/><xref ref-type="other" rid="fund16"/><xref ref-type="other" rid="fund18"/><xref ref-type="other" rid="fund19"/><xref ref-type="other" rid="fund20"/><xref ref-type="other" rid="fund21"/><xref ref-type="fn" rid="con6"/><xref ref-type="fn" rid="conf1"/></contrib><contrib contrib-type="author" corresp="yes" id="author-61578"><name><surname>Chandrasekaran</surname><given-names>Chandramouli</given-names></name><contrib-id authenticated="true" contrib-id-type="orcid">https://orcid.org/0000-0002-1711-590X</contrib-id><email>cchandr1@bu.edu</email><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="aff" rid="aff3">3</xref><xref ref-type="aff" rid="aff11">11</xref><xref ref-type="aff" rid="aff12">12</xref><xref ref-type="other" rid="fund1"/><xref ref-type="other" rid="fund2"/><xref ref-type="other" rid="fund6"/><xref ref-type="other" rid="fund7"/><xref ref-type="other" rid="fund17"/><xref ref-type="fn" rid="con7"/><xref ref-type="fn" rid="conf1"/></contrib><aff id="aff1"><label>1</label><institution>Psychological and Brain Sciences, Boston University</institution><addr-line><named-content content-type="city">Boston</named-content></addr-line><country>United States</country></aff><aff id="aff2"><label>2</label><institution>Bernstein Center for Computational Neuroscience, Bernstein Center for Computational Neuroscience</institution><addr-line><named-content content-type="city">Berlin</named-content></addr-line><country>Germany</country></aff><aff id="aff3"><label>3</label><institution>Department of Anatomy and Neurobiology, Boston University</institution><addr-line><named-content content-type="city">Boston</named-content></addr-line><country>United States</country></aff><aff id="aff4"><label>4</label><institution>Undergraduate Program in Neuroscience, Boston University</institution><addr-line><named-content content-type="city">Boston</named-content></addr-line><country>United States</country></aff><aff id="aff5"><label>5</label><institution>Department of Electrical Engineering, Stanford University</institution><addr-line><named-content content-type="city">Stanford</named-content></addr-line><country>United States</country></aff><aff id="aff6"><label>6</label><institution>Department of Bioengineering, Stanford University</institution><addr-line><named-content content-type="city">Stanford</named-content></addr-line><country>United States</country></aff><aff id="aff7"><label>7</label><institution>Department of Neurobiology, Stanford University</institution><addr-line><named-content content-type="city">Stanford</named-content></addr-line><country>United States</country></aff><aff id="aff8"><label>8</label><institution>Wu Tsai Neurosciences Institute, Stanford University</institution><addr-line><named-content content-type="city">Stanford</named-content></addr-line><country>United States</country></aff><aff id="aff9"><label>9</label><institution>Bio-X Institute, Stanford University</institution><addr-line><named-content content-type="city">Stanford</named-content></addr-line><country>United States</country></aff><aff id="aff10"><label>10</label><institution>Howard Hughes Medical Institute, Stanford University</institution><addr-line><named-content content-type="city">Stanford</named-content></addr-line><country>United States</country></aff><aff id="aff11"><label>11</label><institution>Center for Systems Neuroscience, Boston University</institution><addr-line><named-content content-type="city">Boston</named-content></addr-line><country>United States</country></aff><aff id="aff12"><label>12</label><institution>Department of Biomedical Engineering, Boston University</institution><addr-line><named-content content-type="city">Boston</named-content></addr-line><country>United States</country></aff></contrib-group><contrib-group content-type="section"><contrib contrib-type="editor"><name><surname>Salinas</surname><given-names>Emilio</given-names></name><role>Reviewing Editor</role><aff><institution>Wake Forest School of Medicine</institution><country>United States</country></aff></contrib><contrib contrib-type="senior_editor"><name><surname>Frank</surname><given-names>Michael J</given-names></name><role>Senior Editor</role><aff><institution>Brown University</institution><country>United States</country></aff></contrib></contrib-group><pub-date date-type="publication" publication-format="electronic"><day>06</day><month>08</month><year>2021</year></pub-date><pub-date pub-type="collection"><year>2021</year></pub-date><volume>10</volume><elocation-id>e67490</elocation-id><history><date date-type="received" iso-8601-date="2021-02-12"><day>12</day><month>02</month><year>2021</year></date><date date-type="accepted" iso-8601-date="2021-08-04"><day>04</day><month>08</month><year>2021</year></date></history><pub-history><event><event-desc>This manuscript was published as a preprint at bioRxiv.</event-desc><date date-type="preprint" iso-8601-date="2021-02-08"><day>08</day><month>02</month><year>2021</year></date><self-uri content-type="preprint" xlink:href="https://doi.org/10.1101/2021.02.07.430135"/></event></pub-history><permissions><copyright-statement>&#169; 2021, Lee et al</copyright-statement><copyright-year>2021</copyright-year><copyright-holder>Lee et al</copyright-holder><ali:free_to_read/><license xlink:href="http://creativecommons.org/licenses/by/4.0/"><ali:license_ref>http://creativecommons.org/licenses/by/4.0/</ali:license_ref><license-p>This article is distributed under the terms of the <ext-link ext-link-type="uri" xlink:href="http://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution License</ext-link>, which permits unrestricted use and redistribution provided that the original author and source are credited.</license-p></license></permissions><self-uri content-type="pdf" xlink:href="elife-67490-v4.pdf"/><abstract><p>Cortical circuits are thought to contain a large number of cell types that coordinate to produce behavior. Current in vivo methods rely on clustering of specified features of extracellular waveforms to identify putative cell types, but these capture only a small amount of variation. Here, we develop a new method (<italic>WaveMAP</italic>) that combines non-linear dimensionality reduction with graph clustering to identify putative cell types. We apply <italic>WaveMAP</italic> to extracellular waveforms recorded from dorsal premotor cortex of macaque monkeys performing a decision-making task. Using <italic>WaveMAP</italic>, we robustly establish eight waveform clusters and show that these clusters recapitulate previously identified narrow- and broad-spiking types while revealing previously unknown diversity within these subtypes. The eight clusters exhibited distinct laminar distributions, characteristic firing rate patterns, and decision-related dynamics. Such insights were weaker when using feature-based approaches. <italic>WaveMAP</italic> therefore provides a more nuanced understanding of the dynamics of cell types in cortical circuits.</p></abstract><kwd-group kwd-group-type="author-keywords"><kwd>nonlinear dimensionality reduction</kwd><kwd>waveforms</kwd><kwd>cell types</kwd><kwd>circuits</kwd><kwd>layers</kwd></kwd-group><kwd-group kwd-group-type="research-organism"><title>Research organism</title><kwd>Rhesus macaque</kwd></kwd-group><funding-group><award-group id="fund1"><funding-source><institution-wrap><institution-id institution-id-type="FundRef">http://dx.doi.org/10.13039/100000065</institution-id><institution>National Institute of Neurological Disorders and Stroke</institution></institution-wrap></funding-source><award-id>R00NS092972</award-id><principal-award-recipient><name><surname>Chandrasekaran</surname><given-names>Chandramouli</given-names></name></principal-award-recipient></award-group><award-group id="fund2"><funding-source><institution-wrap><institution-id institution-id-type="FundRef">http://dx.doi.org/10.13039/100000065</institution-id><institution>National Institute of Neurological Disorders and Stroke</institution></institution-wrap></funding-source><award-id>K99NS092972</award-id><principal-award-recipient><name><surname>Chandrasekaran</surname><given-names>Chandramouli</given-names></name></principal-award-recipient></award-group><award-group id="fund3"><funding-source><institution-wrap><institution-id institution-id-type="FundRef">http://dx.doi.org/10.13039/100000011</institution-id><institution>Howard Hughes Medical Institute</institution></institution-wrap></funding-source><principal-award-recipient><name><surname>Shenoy</surname><given-names>Krishna V</given-names></name></principal-award-recipient></award-group><award-group id="fund4"><funding-source><institution-wrap><institution-id institution-id-type="FundRef">http://dx.doi.org/10.13039/100000025</institution-id><institution>National Institute of Mental Health</institution></institution-wrap></funding-source><award-id>R00MH101234</award-id><principal-award-recipient><name><surname>Medalla</surname><given-names>Maria</given-names></name></principal-award-recipient></award-group><award-group id="fund5"><funding-source><institution-wrap><institution-id institution-id-type="FundRef">http://dx.doi.org/10.13039/100000025</institution-id><institution>National Institute of Mental Health</institution></institution-wrap></funding-source><award-id>R01MH116008</award-id><principal-award-recipient><name><surname>Medalla</surname><given-names>Maria</given-names></name></principal-award-recipient></award-group><award-group id="fund6"><funding-source><institution-wrap><institution-id institution-id-type="FundRef">http://dx.doi.org/10.13039/100001391</institution-id><institution>Whitehall Foundation</institution></institution-wrap></funding-source><award-id>2019-12-77</award-id><principal-award-recipient><name><surname>Chandrasekaran</surname><given-names>Chandramouli</given-names></name></principal-award-recipient></award-group><award-group id="fund7"><funding-source><institution-wrap><institution-id institution-id-type="FundRef">http://dx.doi.org/10.13039/100000874</institution-id><institution>Brain and Behavior Research Foundation</institution></institution-wrap></funding-source><award-id>27923</award-id><principal-award-recipient><name><surname>Chandrasekaran</surname><given-names>Chandramouli</given-names></name></principal-award-recipient></award-group><award-group id="fund8"><funding-source><institution-wrap><institution-id institution-id-type="FundRef">http://dx.doi.org/10.13039/100000052</institution-id><institution>NIH Office of the Director</institution></institution-wrap></funding-source><award-id>DP1HD075623</award-id><principal-award-recipient><name><surname>Shenoy</surname><given-names>Krishna V</given-names></name></principal-award-recipient></award-group><award-group id="fund9"><funding-source><institution-wrap><institution-id institution-id-type="FundRef">http://dx.doi.org/10.13039/100000055</institution-id><institution>National Institute on Deafness and Other Communication Disorders</institution></institution-wrap></funding-source><award-id>DC014034</award-id><principal-award-recipient><name><surname>Shenoy</surname><given-names>Krishna V</given-names></name></principal-award-recipient></award-group><award-group id="fund10"><funding-source><institution-wrap><institution-id institution-id-type="FundRef">http://dx.doi.org/10.13039/100000055</institution-id><institution>National Institute on Deafness and Other Communication Disorders</institution></institution-wrap></funding-source><award-id>DC017844</award-id><principal-award-recipient><name><surname>Shenoy</surname><given-names>Krishna V</given-names></name></principal-award-recipient></award-group><award-group id="fund11"><funding-source><institution-wrap><institution-id institution-id-type="FundRef">http://dx.doi.org/10.13039/100000065</institution-id><institution>National Institute of Neurological Disorders and Stroke</institution></institution-wrap></funding-source><award-id>NS095548</award-id><principal-award-recipient><name><surname>Shenoy</surname><given-names>Krishna V</given-names></name></principal-award-recipient></award-group><award-group id="fund12"><funding-source><institution-wrap><institution-id institution-id-type="FundRef">http://dx.doi.org/10.13039/100000065</institution-id><institution>National Institute of Neurological Disorders and Stroke</institution></institution-wrap></funding-source><award-id>NS098968</award-id><principal-award-recipient><name><surname>Shenoy</surname><given-names>Krishna V</given-names></name></principal-award-recipient></award-group><award-group id="fund13"><funding-source><institution-wrap><institution-id institution-id-type="FundRef">http://dx.doi.org/10.13039/100000185</institution-id><institution>Defense Advanced Research Projects Agency</institution></institution-wrap></funding-source><award-id>N66001-10-C-2010</award-id><principal-award-recipient><name><surname>Shenoy</surname><given-names>Krishna V</given-names></name></principal-award-recipient></award-group><award-group id="fund14"><funding-source><institution-wrap><institution-id institution-id-type="FundRef">http://dx.doi.org/10.13039/100000185</institution-id><institution>Defense Advanced Research Projects Agency</institution></institution-wrap></funding-source><award-id>W911NF-14-2-0013</award-id><principal-award-recipient><name><surname>Shenoy</surname><given-names>Krishna V</given-names></name></principal-award-recipient></award-group><award-group id="fund15"><funding-source><institution-wrap><institution-id institution-id-type="FundRef">http://dx.doi.org/10.13039/100000893</institution-id><institution>Simons Foundation</institution></institution-wrap></funding-source><award-id>325380</award-id><principal-award-recipient><name><surname>Shenoy</surname><given-names>Krishna V</given-names></name></principal-award-recipient></award-group><award-group id="fund16"><funding-source><institution-wrap><institution-id institution-id-type="FundRef">http://dx.doi.org/10.13039/100000893</institution-id><institution>Simons Foundation</institution></institution-wrap></funding-source><award-id>543045</award-id><principal-award-recipient><name><surname>Shenoy</surname><given-names>Krishna V</given-names></name></principal-award-recipient></award-group><award-group id="fund17"><funding-source><institution-wrap><institution-id institution-id-type="FundRef">http://dx.doi.org/10.13039/100000065</institution-id><institution>National Institute of Neurological Disorders and Stroke</institution></institution-wrap></funding-source><award-id>NS122969</award-id><principal-award-recipient><name><surname>Chandrasekaran</surname><given-names>Chandramouli</given-names></name></principal-award-recipient></award-group><award-group id="fund18"><funding-source><institution-wrap><institution-id institution-id-type="FundRef">http://dx.doi.org/10.13039/100000006</institution-id><institution>Office of Naval Research</institution></institution-wrap></funding-source><award-id>N000141812158</award-id><principal-award-recipient><name><surname>Shenoy</surname><given-names>Krishna V</given-names></name></principal-award-recipient></award-group><award-group id="fund19"><funding-source><institution-wrap><institution-id institution-id-type="FundRef">http://dx.doi.org/10.13039/100005492</institution-id><institution>Stanford University</institution></institution-wrap></funding-source><principal-award-recipient><name><surname>Shenoy</surname><given-names>Krishna V</given-names></name></principal-award-recipient></award-group><award-group id="fund20"><funding-source><institution-wrap><institution>Wu Tsai Neurosciences Institute, Stanford University</institution></institution-wrap></funding-source><principal-award-recipient><name><surname>Shenoy</surname><given-names>Krishna V</given-names></name></principal-award-recipient></award-group><award-group id="fund21"><funding-source><institution-wrap><institution-id institution-id-type="FundRef">http://dx.doi.org/10.13039/100010865</institution-id><institution>Stanford Engineering</institution></institution-wrap></funding-source><principal-award-recipient><name><surname>Shenoy</surname><given-names>Krishna V</given-names></name></principal-award-recipient></award-group><funding-statement>The funders had no role in study design, data collection and interpretation, or the decision to submit the work for publication.</funding-statement></funding-group><custom-meta-group><custom-meta specific-use="meta-only"><meta-name>Author impact statement</meta-name><meta-value>WaveMAP is a novel approach that combines nonlinear dimensionality reduction with graph clustering on extracellular waveforms to reveal previously obscured cell type diversity in monkey cortex.</meta-value></custom-meta></custom-meta-group></article-meta></front><body><sec id="s1" sec-type="intro"><title>Introduction</title><p>The processes involved in decision-making, such as deliberation on sensory evidence and the preparation and execution of motor actions, are thought to emerge from the coordinated dynamics within and between cortical layers (<xref ref-type="bibr" rid="bib25">Chandrasekaran et al., 2017</xref>; <xref ref-type="bibr" rid="bib54">Finn et al., 2019</xref>), cell types (<xref ref-type="bibr" rid="bib130">Pinto and Dan, 2015</xref>; <xref ref-type="bibr" rid="bib51">Estebanez et al., 2017</xref>; <xref ref-type="bibr" rid="bib95">Lui et al., 2021</xref>; <xref ref-type="bibr" rid="bib88">Kvitsiani et al., 2013</xref>), and brain areas (<xref ref-type="bibr" rid="bib58">Gold and Shadlen, 2007</xref>; <xref ref-type="bibr" rid="bib29">Cisek, 2012</xref>). A large body of research has described differences in decision-related dynamics across brain areas (<xref ref-type="bibr" rid="bib46">Ding and Gold, 2012</xref>; <xref ref-type="bibr" rid="bib166">Thura and Cisek, 2014</xref>; <xref ref-type="bibr" rid="bib140">Roitman and Shadlen, 2002</xref>; <xref ref-type="bibr" rid="bib63">Hanks et al., 2015</xref>) and a smaller set of studies has provided insight into layer-dependent dynamics during decision-making (<xref ref-type="bibr" rid="bib25">Chandrasekaran et al., 2017</xref>; <xref ref-type="bibr" rid="bib54">Finn et al., 2019</xref>; <xref ref-type="bibr" rid="bib26">Chandrasekaran et al., 2019</xref>; <xref ref-type="bibr" rid="bib14">Bastos et al., 2018</xref>). However, we currently do not understand how decision-related dynamics emerge across putative cell types. Here, we address this open question by developing a new method, <italic>WaveMAP</italic>, that combines non-linear dimensionality reduction and graph-based clustering. We apply <italic>WaveMAP</italic> to extracellular waveforms to identify putative cell classes and examine their physiological, functional, and laminar distribution properties.</p><p>In mice, and to some extent in rats, transgenic tools allow the in vivo detection of particular cell types (<xref ref-type="bibr" rid="bib130">Pinto and Dan, 2015</xref>; <xref ref-type="bibr" rid="bib95">Lui et al., 2021</xref>), whereas in vivo studies in primates are largely restricted to using features of the extracellular action potential (EAP) such as trough to peak duration, spike width, and cell firing rate (FR). Early in vivo monkey work (<xref ref-type="bibr" rid="bib119">Mountcastle et al., 1969</xref>) introduced the importance of EAP features, such as spike duration and action potential (AP) width, in identifying cell types. These experiments introduced the concept of broad- and narrow-spiking neurons. Later experiments in the guinea pig (<xref ref-type="bibr" rid="bib105">McCormick et al., 1985</xref>), cat (<xref ref-type="bibr" rid="bib7">Azouz et al., 1997</xref>), and the rat (<xref ref-type="bibr" rid="bib152">Simons, 1978</xref>; <xref ref-type="bibr" rid="bib13">Barth&#243; et al., 2004</xref>) then helped establish the idea that these broad- and narrow-spiking extracellular waveform shapes mostly corresponded to excitatory and inhibitory cells, respectively. These results have been used as the basis for identifying cell types in primate recordings (<xref ref-type="bibr" rid="bib72">Johnston et al., 2009</xref>; <xref ref-type="bibr" rid="bib111">Merchant et al., 2008</xref>; <xref ref-type="bibr" rid="bib112">Merchant et al., 2012</xref>). This method of identifying cell types in mammalian cortex in vivo is widely used in neuroscience but it is insufficient to capture the known structural and transcriptomic diversity of cell types in the monkey and the mouse (<xref ref-type="bibr" rid="bib66">Hodge et al., 2019</xref>; <xref ref-type="bibr" rid="bib86">Krienen et al., 2020</xref>). Furthermore, recent observations in the monkey defy this simple classification of broad- and narrow-spiking cells as corresponding to excitatory and inhibitory cells, respectively. Three such examples in the primate that have resisted this principle are narrow-spiking pyramidal tract neurons in deep layers of M1 (Betz&#160;cells, <xref ref-type="bibr" rid="bib174">Vigneswaran et al., 2011</xref>; <xref ref-type="bibr" rid="bib154">Soares et al., 2017</xref>), narrow and broad spike widths among excitatory pyramidal tract neurons of premotor cortex (<xref ref-type="bibr" rid="bib93">Lemon et al., 2021</xref>), and narrow-spiking excitatory cells in layer III of V1, V2, and MT (<xref ref-type="bibr" rid="bib33">Constantinople et al., 2009</xref>; <xref ref-type="bibr" rid="bib3">Amatrudo et al., 2012</xref>; <xref ref-type="bibr" rid="bib127">Onorato et al., 2020</xref>; <xref ref-type="bibr" rid="bib79">Kelly et al., 2019</xref>).</p><p>To capture a more representative diversity of cell types in vivo, more recent studies have incorporated additional features of EAPs (beyond AP width) such as trough to peak duration (<xref ref-type="bibr" rid="bib4">Ardid et al., 2015</xref>), repolarization time (<xref ref-type="bibr" rid="bib170">Trainito et al., 2019</xref>; <xref ref-type="bibr" rid="bib10">Banaie Boroujeni et al., 2021</xref>), and triphasic waveform shape (<xref ref-type="bibr" rid="bib12">Barry, 2015</xref>; <xref ref-type="bibr" rid="bib139">Robbins et al., 2013</xref>). Although these user-specified methods are amenable to human intuition, they are insufficient to distinguish between previously identified cell types (<xref ref-type="bibr" rid="bib87">Krimer et al., 2005</xref>; <xref ref-type="bibr" rid="bib174">Vigneswaran et al., 2011</xref>; <xref ref-type="bibr" rid="bib112">Merchant et al., 2012</xref>). It is also unclear how to choose these user-specified features in a principled manner (i.e. one set that maximizes explanatory power) as they are often highly correlated with one another. This results in different studies choosing between different sets of specified features each yielding different inferred cell classes (<xref ref-type="bibr" rid="bib170">Trainito et al., 2019</xref>; <xref ref-type="bibr" rid="bib175">Viskontas et al., 2007</xref>; <xref ref-type="bibr" rid="bib75">Katai et al., 2010</xref>; <xref ref-type="bibr" rid="bib161">Sun et al., 2021</xref>). Thus, it is difficult to compare putative cell types across literature. Some studies even conclude that there is no single set of specified features that is a reliable differentiator of type (<xref ref-type="bibr" rid="bib178">Weir et al., 2014</xref>).</p><p>These issues led us to investigate techniques that do&#160;not require feature specification but are designed to find patterns in complex datasets through non-linear dimensionality reduction. Such methods have seen usage in diverse neuroscientific contexts such as single-cell transcriptomics (<xref ref-type="bibr" rid="bib162">Tasic et al., 2018</xref>; <xref ref-type="bibr" rid="bib16">Becht et al., 2019</xref>), in analyzing models of biological neural networks (<xref ref-type="bibr" rid="bib101">Maheswaranathan et al., 2019</xref>; <xref ref-type="bibr" rid="bib82">Kleinman et al., 2019</xref>), the identification of behavior (<xref ref-type="bibr" rid="bib9">Bala et al., 2020</xref>; <xref ref-type="bibr" rid="bib67">Hsu and Yttri, 2020</xref>; <xref ref-type="bibr" rid="bib47">Dolensek et al., 2020</xref>), and in electrophysiology (<xref ref-type="bibr" rid="bib71">Jia et al., 2019</xref>; <xref ref-type="bibr" rid="bib60">Gouwens et al., 2020</xref>; <xref ref-type="bibr" rid="bib83">Klemp&#237;&#345; et al., 2020</xref>; <xref ref-type="bibr" rid="bib103">Markanday et al., 2020</xref>; <xref ref-type="bibr" rid="bib43">Dimitriadis et al., 2018</xref>).</p><p>Here, in a novel technique that we term <italic>WaveMAP</italic>, we combine a non-linear dimensionality reduction method (Universal Manifold Approximation and Projection [UMAP], <xref ref-type="bibr" rid="bib106">McInnes et al., 2018</xref>) with graph community detection (Louvain community detection, (<xref ref-type="bibr" rid="bib22">Blondel et al., 2008</xref>); we colloquially call &#8216;clustering&#8217;) to understand the physiological properties, decision-related dynamics, and laminar distribution of candidate cell types during decision-making. We applied <italic>WaveMAP</italic> to extracellular waveforms collected from neurons in macaque dorsal premotor cortex (PMd) in a decision-making task using laminar multi-channel probes (16 electrode &#8216;U-probes&#8217;). We found that <italic>WaveMAP</italic> significantly outperformed current approaches without need for user-specification of waveform features like trough to peak duration. This data-driven approach exposed more diversity in extracellular waveform shape than any constructed spike features in isolation or in combination. Using interpretable machine learning, we also show that <italic>WaveMAP</italic> picks up on nuanced and meaningful biological variability in waveform shape.</p><p><italic>WaveMAP</italic> revealed three broad-spiking and five narrow-spiking waveform types that differed significantly in shape, physiological, functional, and laminar distribution properties. Although most narrow-spiking cells had the high maximum firing rates typically associated with inhibitory neurons, some had firing rates similar to broad-spiking neurons which are typically considered to be excitatory. The time at which choice selectivity (&#8216;discrimination time&#8217;) emerged for many narrow-spiking cell classes was earlier than broad-spiking neuron classes&#8212;except for the narrow-spiking cells that had broad-spiking like maximum firing rates. Finally, many clusters had distinct laminar distributions that appear layer-dependent in a manner matching certain anatomical cell types. This clustering explains variability in discrimination time over and above previously reported laminar differences (<xref ref-type="bibr" rid="bib25">Chandrasekaran et al., 2017</xref>). Together, this constellation of results reveals previously undocumented relationships between waveform shape, physiological, functional, and laminar distribution properties that are missed by traditional approaches. Our results provide powerful new insights into how candidate cell classes can be better identified and how these types coordinate with specific timing, across layers, to shape decision-related dynamics.</p></sec><sec id="s2" sec-type="results"><title>Results</title><sec id="s2-1"><title>Task and behavior</title><p>Two male rhesus macaques (T and O) were trained to perform a red-green reaction time decision-making task (<xref ref-type="fig" rid="fig1">Figure 1A</xref>). The task was to discriminate the dominant color of a central static red-green checkerboard cue and to report their decision with an arm movement towards one of two targets (red or green) on the left or right (<xref ref-type="fig" rid="fig1">Figure 1A</xref>).</p><fig id="fig1" position="float"><label>Figure 1.</label><caption><title>Recording locations, waveform shapes, techniques, task, and discrimination behavior.</title><p>(<bold>A</bold>) An illustration of the behavioral setup in the discrimination task.&#160;The monkey was seated with one arm free and one arm gently restrained in a plastic tube via a cloth sling. An infrared-reflecting (IR) bead was taped to the forefinger of the free hand and was used in tracking arm movements. This gave us a readout of the hand&#8217;s position and allowed us to mimic a touch screen. (<bold>B</bold>) A timeline of the decision-making task (top). At bottom is defined the parametrization of difficulty in the task in terms of color coherence and signed color coherence (SC). (<bold>C</bold>) Average discrimination performance and (<bold>D</bold>) Reaction time (RT) over sessions of the two monkeys as a function of the SC of the checkerboard cue. RT plotted here includes both correct and incorrect trials for each session and then averaged across sessions. Gray markers show measured data points along with 2 &#215; S.E.M. estimated over sessions. For many data points in (<bold>C</bold>), the error bars lie within the marker. X-axes in both (<bold>C</bold>), (<bold>D</bold>) depict the SC in %. Y-axes depict the percent responded red in (<bold>C</bold>) and RT in (<bold>D</bold>). Also shown in the inset of (<bold>C</bold>) are discrimination thresholds (mean &#177; S.D. over sessions) estimated from a Weibull fit to the overall percent correct as a function of coherence. The discrimination threshold is the color coherence at which the monkey made 81.6% correct choices. Seventy-five sessions for monkey T (128,989 trials) and 66 sessions for monkey O (108,344 trials) went into the averages. (<bold>E</bold>) The recording location in caudal PMd (top); normalized and aligned isolated single-unit waveforms (n = 625, 1.6 ms each, bottom); and schematic of the 16-channel Plexon U-probe (right) used during the behavioral experiment.</p></caption><graphic mime-subtype="jpeg" mimetype="image" xlink:href="elife-67490.xml.media/fig1.jpg"/></fig><p>The timeline of the task is as follows: a trial began when the monkey touched the center target and fixated on a cross above it. After a short randomized period, two targets red and green appeared on the either side of the center target (see <xref ref-type="fig" rid="fig1">Figure 1B</xref>, top). The target configuration was randomized: sometimes the left target was red and the right target was green or vice versa. After another short randomized target viewing period, a red-green checkerboard appeared in the center of the screen with a variable mixture of red and green squares.</p><p>We parameterized the variability of the checkerboard by its signed color coherence and color coherence. The signed color coherence (SC) provides an estimate of whether there are more red or green squares in the checkerboard. Positive SC indicates the presence of more red squares, whereas negative SC indicates more green squares. SC close to zero (positive or negative) indicates an almost even number of red or green squares (<xref ref-type="fig" rid="fig1">Figure 1B</xref>, bottom). The coherence (C) provides an estimate of the difficulty of a stimulus. Higher coherence indicates that there is more of one color than the other (an easy trial) whereas a lower coherence indicates that the two colors are more equal in number (a difficult trial).</p><p>Our monkeys demonstrated the range of behaviors typically observed in decision-making tasks: monkeys made more errors and were slower for lower coherence checkerboards compared to higher coherence checkerboards (<xref ref-type="fig" rid="fig1">Figure 1C,D</xref>). We used coherence, choice, and reaction times (RT) to analyze the structure of decision-related neural activity.</p></sec><sec id="s2-2"><title>Recordings and single neuron identification</title><p>While monkeys performed this task, we recorded single neurons from the caudal aspect of dorsal premotor cortex (PMd; <xref ref-type="fig" rid="fig1">Figure 1E</xref>, top) using single tungsten (FHC electrodes) or linear multi-contact electrodes (Plexon U-Probes, 625 neurons, 490 U-probe waveforms; <xref ref-type="fig" rid="fig1">Figure 1E</xref>, right) and a Cerebus Acquisition System (Blackrock Microsystems). In this study, we analyzed the average EAP waveforms of these neurons. All waveforms were analyzed after being filtered by a fourth-order high-pass Butterworth filter (250 Hz). A 1.6 ms snippet of the waveform was recorded for each spike and used in these analyses, a duration longer than many studies of waveform shape (<xref ref-type="bibr" rid="bib112">Merchant et al., 2012</xref>).</p><p>We restricted our analysis to well-isolated single neurons identified through a combination of careful online isolation combined with offline spike sorting (see Methods section: <italic>Identification of single neurons during recordings</italic>). Extracellular waveforms were isolated as single neurons by only accepting waveforms with minimal ISI violations (1.5% &lt; 1.5 ms). This combination of online vigilance, combined with offline analysis, provides us the confidence to label these waveforms as single neurons.</p><p>We used previously reported approaches to align, average, and normalize spikes (<xref ref-type="bibr" rid="bib78">Kaufman et al., 2013</xref>; <xref ref-type="bibr" rid="bib153">Snyder et al., 2016</xref>). Spikes were aligned in time via their depolarization trough and normalized between &#8722;1 and 1. &#8216;Positive spiking&#8217; units with large positive amplitude pre-hyperpolarization spikes were dropped from the analysis due to their association with dendrites and axons (<xref ref-type="bibr" rid="bib57">Gold et al., 2009</xref>; <xref ref-type="bibr" rid="bib12">Barry, 2015</xref>; <xref ref-type="bibr" rid="bib161">Sun et al., 2021</xref>). Recordings were pooled across monkeys to increase statistical power for <italic>WaveMAP</italic>.</p></sec><sec id="s2-3"><title>Non-linear dimensionality reduction with graph clustering reveals robust low-dimensional structure in extracellular waveform shape</title><p>In <italic>WaveMAP</italic> (<xref ref-type="fig" rid="fig2">Figure 2</xref>), we use a three-step strategy for the analysis of extracellular waveforms: We first passed the normalized and trough-aligned waveforms (<xref ref-type="fig" rid="fig2">Figure 2A&#8211;i</xref>) into UMAP to obtain a high-dimensional graph (<xref ref-type="fig" rid="fig2">Figure 2A&#8211;ii</xref>; <xref ref-type="bibr" rid="bib106">McInnes et al., 2018</xref>). Second, we used this graph (<xref ref-type="fig" rid="fig2">Figure 2B&#8211;iii</xref>) and passed it into Louvain clustering (<xref ref-type="fig" rid="fig2">Figure 2B-iv</xref>, <xref ref-type="bibr" rid="bib22">Blondel et al., 2008</xref>), to delineate high-dimensional clusters. Third, we used UMAP to project the high-dimensional graph into two dimensions (<xref ref-type="fig" rid="fig2">Figure 2B&#8211;v</xref>). We colored the data points in this projected space according to their Louvain cluster membership found in step two to arrive at our final <italic>WaveMAP</italic> clusters (<xref ref-type="fig" rid="fig2">Figure 2B&#8211;vi</xref>). We also analyzed the <italic>WaveMAP</italic> clusters using interpretable machine learning (<xref ref-type="fig" rid="fig2">Figure 2B&#8211;vii</xref>) and also an inverse transform of UMAP (<xref ref-type="fig" rid="fig2">Figure 2B&#8211;viii</xref>). A detailed explanation of the steps associated with <italic>WaveMAP</italic> is available in the methods, and further mathematical details of <italic>WaveMAP</italic> are available in the Supplementary Information.</p><fig-group><fig id="fig2" position="float"><label>Figure 2.</label><caption><title>Schematic of <italic>WaveMAP.</italic></title><p>(<bold>A</bold>) <italic>WaveMAP</italic> begins with UMAP which projects high-dimensional data into lower dimension while preserving local and global relationships (see <xref ref-type="fig" rid="fig2s1">Figure 2&#8212;figure supplement 1A</xref> for an intuitive diagram).&#160;Normalized average waveforms from single units (<bold>i</bold>) are passed to UMAP (<xref ref-type="bibr" rid="bib106">McInnes et al., 2018</xref>) which begins with the construction of a high-dimensional graph (ii). In the high-dimensional space (ii.a), UMAP constructs a distance metric local to each data point (ii.b). The unit ball (ball with radius of one) of each local metric stretches to the 1st-nearest neighbor. Beyond this unit ball, local distances decrease (ii.c) according to an exponential distribution that is scaled by the local density. This local metric is used to construct a weighted graph with asymmetric edges (ii.d). The 1-nearest neighbors are connected by en edge of weight 1.0. For the next <inline-formula><mml:math id="inf1"><mml:mrow><mml:mi>k</mml:mi><mml:mo mathvariant="normal">-</mml:mo><mml:mn mathvariant="normal">1</mml:mn></mml:mrow></mml:math></inline-formula>-nearest neighbors, this weight then falls off according to the exponential local distance metric (in this diagram <inline-formula><mml:math id="inf2"><mml:mrow><mml:mtext mathvariant="normal">k</mml:mtext><mml:mo mathvariant="normal">=</mml:mo><mml:mn mathvariant="normal">4</mml:mn></mml:mrow></mml:math></inline-formula> with some low weight connections omitted for clarity). These edges, <inline-formula><mml:math id="inf3"><mml:mi>a</mml:mi></mml:math></inline-formula> and <inline-formula><mml:math id="inf4"><mml:mi>b</mml:mi></mml:math></inline-formula>, are made symmetric according to <inline-formula><mml:math id="inf5"><mml:mrow><mml:mrow><mml:mi>a</mml:mi><mml:mo mathvariant="normal">+</mml:mo><mml:mi>b</mml:mi></mml:mrow><mml:mo mathvariant="normal">-</mml:mo><mml:mrow><mml:mi>a</mml:mi><mml:mo mathvariant="normal">&#8901;</mml:mo><mml:mi>b</mml:mi></mml:mrow></mml:mrow></mml:math></inline-formula> (ii.e). (<bold>B</bold>) The high-dimensional graph (iii) captures latent structure in the high-dimensional space. We can use this graph in Louvain community detection (Louvain, iv) (<xref ref-type="bibr" rid="bib22">Blondel et al., 2008</xref>) to find clusters (see <xref ref-type="fig" rid="fig2s1">Figure 2&#8212;figure supplement 1B</xref> for an intuitive diagram). In Louvain, each data point is first initialized as belonging to its own &#8216;community&#8217; (iv.a, analogous to a cluster in a metric space). Then, in an iterative procedure, each data point joins neighboring communities until a measure called &#8216;modularity&#8217; is maximized (iv.b, see Supplemental Information for a definition of modularity). Next, data points in the same final community are aggregated to a single node and the process repeats until the maximal modularity is found on this newly aggregated graph. This process then keeps repeating until the maximal modularity graph is found and the final community memberships are passed back to the original data points. We can also use this graph to find a low-dimensional representation through a graph layout procedure (<bold>v</bold>). The graph layout proceeds by finding a &#8216;low energy&#8217; configuration that balances attractive (shown as springs in v.a) and repulsive (not shown) forces between pairs of points as a function of edge weight or lack thereof. This procedure iteratively minimizes the cross-entropy between the low-dimensional and high-dimensional graphs (v.b). The communities found through Louvain are then combined with the graph layout procedure to arrive at a set of clusters in a low-dimensional embedded space (vi). These clusters (vi, top) can be used to classify the original waveforms (vi, bottom). To investigate &#8216;why&#8217; these data points became clusters, each cluster is examined for locally (within-cluster) important features (SHAP <xref ref-type="bibr" rid="bib98">Lundberg and Lee, 2017</xref>), (vii) and globally important trends (UMAP inverse transform, viii). Not shown is the classifier SHAP values are calculated from. The diagrams for the graph construction and layout are based on UMAP documentation and the diagram for Louvain community detection is based on <xref ref-type="bibr" rid="bib22">Blondel et al., 2008</xref>. <xref ref-type="fig" rid="fig2s1">Figure 2&#8212;figure supplement 1</xref>: An intuitive diagram of local and global distance preservation in UMAP and a schematic of the Louvain clustering process.</p></caption><graphic mime-subtype="jpeg" mimetype="image" xlink:href="elife-67490.xml.media/fig2.jpg"/></fig><fig id="fig2s1" position="float" specific-use="child-fig"><label>Figure 2&#8212;figure supplement 1.</label><caption><title>Diagrams of UMAP and Louvain community detection.</title><p>(<bold>A</bold>) A demonstration of UMAP projection on a 3D point cloud skeleton of a wooly mammoth.&#160;Local and global structures are incorporated and projected into lower dimension. This preservation of information is evident in the maintained structure of individual bone shapes and sensible spatial relationships between the body parts. Idea from M. Noichl (<ext-link ext-link-type="uri" xlink:href="https://github.com/MNoichl/UMAP-examples-mammoth-">https://github.com/MNoichl/UMAP-examples-mammoth-</ext-link>;&#160;<xref ref-type="bibr" rid="bib123">Noichl, 2019</xref>) and mammoth skeleton from the Smithsonian Institute&#8217;s Smithsonian 3D (<ext-link ext-link-type="uri" xlink:href="https://3d.si.edu/">https://3d.si.edu/</ext-link>). (<bold>B</bold>) The Louvain community detection algorithm is applied to weighted symmetric graphs and proceeds in three steps which are said to be one &#8216;pass&#8217; of the algorithm: (1) each node is assigned to its own cluster; (2) each node is randomly moved into a neighboring cluster and if modularity increases, it becomes a member of that cluster; (3) once modularity no longer increases, each cluster is collapsed into one node. This process repeats for multiple passes until modularity no longer increases. The final cluster memberships are then passed back to the data points on the original graph.</p></caption><graphic mime-subtype="jpeg" mimetype="image" xlink:href="elife-67490.xml.media/fig2-figsupp1.jpg"/></fig></fig-group><p><xref ref-type="fig" rid="fig3">Figure 3A</xref> shows how <italic>WaveMAP</italic> provides a clear organization without the need for prior specification of important features.&#160;For expository reasons, and to link to prior literature (<xref ref-type="bibr" rid="bib105">McCormick et al., 1985</xref>; <xref ref-type="bibr" rid="bib31">Connors et al., 1982</xref>), we use the trough to peak duration to loosely subdivide these eight clusters into &#8216;narrow-spiking&#8217; and &#8216;broad-spiking&#8217; cluster sets. The broad-spiking clusters had a trough to peak duration of 0.74 &#177; 0.24 ms (mean &#177; S.D.) and the narrow-spiking clusters had a trough to peak duration of 0.36 &#177; 0.07 ms (mean &#177; S.D.). The narrow-spiking neurons are shown in warm colors (including green) at right in <xref ref-type="fig" rid="fig3">Figure 3A</xref> and the broad-spiking neurons are shown in cool colors at left in the same figure. The narrow-spiking set was composed of five clusters with &#8216;narrow-spiking&#8217; waveforms (clusters &#9312;, &#9313;, &#9314;, &#9315;, &#9316;) and comprised &#8764;12%, &#8764;12%, &#8764;18%, &#8764;7%, and &#8764;19% (n = 72, 78, 113, 43, and 116) respectively of the total waveforms, for &#8764;68% of total waveforms. The broad-spiking set was composed of three &#8216;broad-spiking&#8217; waveform clusters (&#9317;, &#9318;, and &#9319;) comprising &#8764;13%, &#8764;5%, and &#8764;15% (n = 80, 29, and 94) respectively and collectively &#8764;32% of total waveforms.</p><fig-group><fig id="fig3" position="float"><label>Figure 3.</label><caption><title>UMAP and Louvain clustering reveal a robust diversity of averaged single-unit waveform shapes.</title><p>(<bold>A</bold>) Scatter plot of normalized EAP waveforms in UMAP space colored by Louvain cluster membership.&#160;Adjacent to each numbered cluster (&#9312; through &#9319;) is shown all member waveforms and the average waveform shape (in black). Each waveform is 1.6 ms. Percentages do not add to 100% due to rounding. (<bold>B</bold>) Louvain clustering resolution parameter versus modularity score (in blue, axis at left) and the number of clusters (communities) found (in gray, axis at right). This was averaged over 25 runs for <italic>WaveMAP</italic> using 25 random samples and seeds of 80% of the full dataset at each resolution parameter from 0 to 8 in 0.5 unit increments (a subset of the data was used to obtain error bars). Each data point is the mean &#177; S.D. with many S.D. bars smaller than the marker size. Green chevrons indicate the resolution parameter of 1.5 chosen and its position along both curves. (<bold>C</bold>) The confusion matrix of a gradient boosted decision tree classifier with 5-fold cross-validation and hyperparameter optimization. The main diagonal shows the held-out classification accuracy for each cluster and the off-diagonals show the misclassification rates for each cluster to each other cluster. The average accuracy across all clusters was 91%. <xref ref-type="fig" rid="fig3s1">Figure 3&#8212;figure supplement 1</xref>: A stability analysis of <italic>WaveMAP</italic> clustering showing solutions are stable with respect to random seed, random data subset, and in an ensembled version of Louvain. <xref ref-type="fig" rid="fig3s2">Figure 3&#8212;figure supplement 2</xref>: Different amplitude normalizations have similar effect but this processing is essential to <italic>WaveMAP</italic> extracting meaningful structure. <xref ref-type="fig" rid="fig3s3">Figure 3&#8212;figure supplement 3</xref>: Pre-processing waveform data with principal component analysis does not alter <italic>WaveMAP</italic> results.</p></caption><graphic mime-subtype="jpeg" mimetype="image" xlink:href="elife-67490.xml.media/fig3.jpg"/></fig><fig id="fig3s1" position="float" specific-use="child-fig"><label>Figure 3&#8212;figure supplement 1.</label><caption><title>Stability analysis of <italic>WaveMAP</italic>: (<bold>A</bold>) <italic>WaveMAP</italic> instantiated with three different UMAP random seeds (each row is a different seed) and Louvain resolution parameters.</title><p>(<bold>B</bold>) The mean &#177; S.D. number of clusters (Louvain communities; in red) produced by <italic>WaveMAP</italic> and adjusted mutual information score (in green) across 100 random samples at various proportions of the full dataset. Number of clusters and adjusted mutual information (AMI) are omitted for 100% of the data because the number of clusters is equal to our result and thus AMI is 1.0 by definition of it being itself. (<bold>C</bold>) Ensemble clustering on graphs (ECG) is also applied to the UMAP graph. This also produced eight clusters.</p></caption><graphic mime-subtype="jpeg" mimetype="image" xlink:href="elife-67490.xml.media/fig3-figsupp1.jpg"/></fig><fig id="fig3s2" position="float" specific-use="child-fig"><label>Figure 3&#8212;figure supplement 2.</label><caption><title>Different amplitude normalizations have similar effect but are essential for meaningful <italic>WaveMAP</italic> structure.</title><p>(<bold>A</bold>) As in <xref ref-type="fig" rid="fig3s1">Figure 3&#8212;figure supplement 1B</xref>, the number of Louvain communities found across various random subsets and random seeds.&#160;The mean number of clusters shown on the full dataset with a dashed line. (<bold>B</bold>) The <italic>WaveMAP</italic> clusters on waveforms with &#177;one trough to peak normalization (used in the paper). (<bold>C</bold>) The same random subsetting and random seed strategy in (<bold>A</bold>) applied to waveform data normalized to trough depth. (<bold>D</bold>) <italic>WaveMAP</italic> clusters applied to waveform data normalized to trough depth. (<bold>E</bold>) Un-normalized waveforms were passed through <italic>WaveMAP</italic> with the same parameters used previously. (<bold>F</bold>) Each waveform in the projected UMAP space found in (<bold>E</bold>) is colored according to the amplitude (log of the difference between maximum and minimum values).</p></caption><graphic mime-subtype="jpeg" mimetype="image" xlink:href="elife-67490.xml.media/fig3-figsupp2.jpg"/></fig><fig id="fig3s3" position="float" specific-use="child-fig"><label>Figure 3&#8212;figure supplement 3.</label><caption><title>Pre-processing with PCA does not alter <italic>WaveMAP</italic> structure: the full dataset was pre-processed with principal component analysis (PCA) and projected into the space of the first three principal components.</title><p>The scree plot, with explained variance above each bar, shows that the dataset is low-dimensional with 94% of the variance explained in three components. The clustering on the right was produced by applying <italic>WaveMAP</italic> to the embedded dataset and is very similar to those produced by <italic>WaveMAP</italic> on data without pre-processing.</p></caption><graphic mime-subtype="jpeg" mimetype="image" xlink:href="elife-67490.xml.media/fig3-figsupp3.jpg"/></fig></fig-group><p>The number of clusters identified by <italic>WaveMAP</italic> is dependent on the resolution parameter for Louvain clustering. A principled way to choose this resolution parameter is to use the modularity score (a measure of how tightly interconnected the members of a cluster are) as the objective function to maximize. We chose a resolution parameter of 1.5 that maximized modularity score while ensuring that we did not overly fractionate the dataset (n &lt; 20 within a cluster; <xref ref-type="fig" rid="fig3">Figure 3A,B</xref>, and columns of <xref ref-type="fig" rid="fig3s1">Figure 3&#8212;figure supplement 1A</xref>). Additional details are available in the &#8216;Parameter Choice&#8217; section of the Supplementary Information.</p><p>Louvain clustering with this resolution parameter of 1.5 identified eight clusters in total (<xref ref-type="fig" rid="fig3">Figure 3A</xref>). Note, using a slightly higher resolution parameter (2.0), a suboptimal solution in terms of modularity, led to seven clusters (<xref ref-type="fig" rid="fig3s1">Figure 3&#8212;figure supplement 1A</xref>). The advantage of Louvain clustering is that it is hierarchical and choosing a slightly larger resolution parameter will only merge clusters rather than generating entirely new cluster solutions. Here, we found that the higher resolution parameter merged two of the broad-spiking clusters &#9317; and &#9318; while keeping the rest of the clusters largely intact and more importantly, did not lead to material changes in the conclusions of analyses of physiology, decision-related dynamics, or laminar distribution described below. Finally, an alternative ensembled version of the Louvain clustering algorithm (ensemble clustering for graphs [ECG] <xref ref-type="bibr" rid="bib132">Poulin and Th&#233;berge, 2018</xref>), which requires setting no resolution parameter, produced a clustering almost exactly the same as our results (<xref ref-type="fig" rid="fig3s1">Figure 3&#8212;figure supplement 1C</xref>).</p><p>To validate that <italic>WaveMAP</italic> finds a &#8216;real&#8217; representation of the data, we examined if a very different method could learn the same representation. We trained a gradient boosted decision tree classifier (with a softmax multi-class objective) on the exact same waveform data (vectors of 48 time points, 1.6 ms time length) passed to <italic>WaveMAP</italic> and used a test-train split with k-fold cross-validation applied to the training data. Hyperparameters were tuned with a 5-fold cross-validated grid search on the training data and final parameters shown in <xref ref-type="table" rid="table1">Table 1</xref>. After training, the classification was evaluated against the held-out test set (which was never seen in model training/tuning) and the accuracy, averaged over clusters, was 91%. <xref ref-type="fig" rid="fig3">Figure 3C</xref> shows the associated confusion matrix which contains accuracies for each class along the main diagonal and misclassification rates on the off-diagonals. Such successful classification at high levels of accuracy was only possible because there were &#8216;generalizable&#8217; clusterings of similar waveform shapes in the high-dimensional space revealed by UMAP.</p><table-wrap id="table1" position="float"><label>Table 1.</label><caption><title>Non-default model hyperparameters used.</title></caption><table frame="hsides" rules="groups"><thead><tr><th>Function</th><th>Function name</th><th>Parameters</th><th>Value</th></tr></thead><tbody><tr><td>UMAP Algorithm (Python)</td><td>umap.UMAP</td><td>n_neighbors min_dist random_state metric</td><td>20 0.1 42 &#8217;euclidean&#8217;</td></tr><tr><td>Louvain Clustering (Python)</td><td>cylouvain.best_partition</td><td>resolution</td><td>1.5</td></tr><tr><td>UMAP Gradient Boosted Decision Tree (Python)</td><td>xgboost.XGBClassifier</td><td>max_depth min_child_weight n_estimators learning_rate objective rand_state</td><td>4 2.5 100 0.3 &#8217;multi:softmax&#8217; 42</td></tr><tr><td>GMM Gradient Boosted Decision Tree (Python)</td><td>xgboost.XGBClassifier</td><td>max_depth min_child_weight n_estimators learning_rate objective seed</td><td>10 2.5 110 0.05 &#8217;multi:softmax&#8217; 42</td></tr><tr><td>8-Class GMM Gradient Boosted Decision Tree (Python)</td><td>xgboost.XGBClassifier</td><td>max_depth min_child_weight n_estimators learning_rate objective seed</td><td>2 1.5 100 0.3 &#8217;multi:softmax&#8217; 42</td></tr><tr><td>Gaussian Mixture Model (MATLAB)</td><td>fitgmdist</td><td>k start replicates statset(&#8217;MaxIter&#8217;)</td><td>4 &#8217;randsample&#8217; 50 200</td></tr><tr><td>DBSCAN (Python)</td><td>sklearn.cluster.DBSCAN</td><td>eps min_samples</td><td>3 15</td></tr></tbody></table></table-wrap><p>We find that cluster memberships found by <italic>WaveMAP</italic> are stable with respect to random seed when resolution parameter and n_neighbors parameter are fixed. This stability of <italic>WaveMAP</italic> clusters with respect to random seed is because much of the variability in UMAP layout is the result of the projection process (<xref ref-type="fig" rid="fig2">Figure 2B&#8211;v</xref>.a). Louvain clustering operates before this step on the high-dimensional graph generated by UMAP which is far less sensitive to the random seed. Thus, the actual layout of the projected clusters might differ subtly according to random seed, but the cluster memberships largely do not (see Supplementary Information and columns of <xref ref-type="fig" rid="fig3s1">Figure 3&#8212;figure supplement 1A</xref>). Here, we fix the random seed purely for visual reproducibility purposes in the figure. Thus, across different random seeds and constant resolution, the clusters found by <italic>WaveMAP</italic> did not change because the graph construction was consistent across random seed at least on our dataset (<xref ref-type="fig" rid="fig3s1">Figure 3&#8212;figure supplement 1A</xref>).</p><p>We also found that <italic>WaveMAP</italic> was robust to data subsetting (randomly sampled subsets of the full dataset, see Supplementary Information <xref ref-type="bibr" rid="bib167">Tibshirani and Walther, 2005</xref>), unlike other clustering approaches (<xref ref-type="fig" rid="fig3s1">Figure 3&#8212;figure supplement 1B</xref>, green, <xref ref-type="fig" rid="fig4s1">Figure 4&#8212;figure supplement 1</xref>). We applied <italic>WaveMAP</italic> to 100 random subsets each from 10% to 90% of the full dataset and compared this to a &#8216;reference&#8217; clustering produced by the procedure on the full dataset. WaveMAP was consistent in both cluster number (<xref ref-type="fig" rid="fig3s1">Figure 3&#8212;figure supplement 1B</xref>, red) and cluster membership (which waveforms were frequently &#8216;co-members&#8217; of the same cluster; <xref ref-type="fig" rid="fig3s1">Figure 3&#8212;figure supplement 1B</xref>, green).</p><p>Finally, our results were also robust to another standard approach to normalizing spike waveforms: normalization to trough depth. This method exhibited the same stability in cluster number (<xref ref-type="fig" rid="fig3s2">Figure 3&#8212;figure supplement 2C</xref>), and also showed no differences in downstream analyses (<xref ref-type="fig" rid="fig3s2">Figure 3&#8212;figure supplement 2D</xref>). Without amplitude normalization, interesting structure was lost (<xref ref-type="fig" rid="fig3s2">Figure 3&#8212;figure supplement 2E</xref>) because UMAP likely attempts to explain both waveform amplitude and shape (shown as a smooth gradient in the trough to peak height difference <xref ref-type="fig" rid="fig3s2">Figure 3&#8212;figure supplement 2F</xref>). In addition, common recommendations to apply PCA before non-linear dimensionality reduction were not as important for our waveform dataset, which was fairly low-dimensional (first three PC&#8217;s explained 94% variance). Projecting waveforms into a three-dimensional PC-space before <italic>WaveMAP</italic> produced a clustering very similar to data without this step (<xref ref-type="fig" rid="fig3s3">Figure 3&#8212;figure supplement 3</xref>).</p></sec><sec id="s2-4"><title>Traditional clustering methods with specified features sub-optimally capture waveform diversity</title><p>Our unsupervised approach (<xref ref-type="fig" rid="fig3">Figure 3</xref>) generates a stable clustering of waveforms. However, is our method better than the traditional approach of using specified features (<xref ref-type="bibr" rid="bib153">Snyder et al., 2016</xref>; <xref ref-type="bibr" rid="bib170">Trainito et al., 2019</xref>; <xref ref-type="bibr" rid="bib76">Kaufman et al., 2010</xref>; <xref ref-type="bibr" rid="bib78">Kaufman et al., 2013</xref>; <xref ref-type="bibr" rid="bib113">Mitchell et al., 2007</xref>; <xref ref-type="bibr" rid="bib13">Barth&#243; et al., 2004</xref>; <xref ref-type="bibr" rid="bib111">Merchant et al., 2008</xref>; <xref ref-type="bibr" rid="bib112">Merchant et al., 2012</xref>; <xref ref-type="bibr" rid="bib155">Song and McPeek, 2010</xref>; <xref ref-type="bibr" rid="bib152">Simons, 1978</xref>; <xref ref-type="bibr" rid="bib72">Johnston et al., 2009</xref>)? To compare how <italic>WaveMAP</italic> performs relative to traditional clustering methods built on specified features, we applied a Gaussian mixture model (GMM) to the three-dimensional space produced by commonly used waveform features. In accordance with previous work, the features we chose (<xref ref-type="fig" rid="fig4">Figure 4A</xref>) were action potential (AP) width of the spike (width in milliseconds of the full-width half minimum of the depolarization trough <xref ref-type="bibr" rid="bib174">Vigneswaran et al., 2011</xref>); the peak ratio the ratio of pre-hyperpolarization peak (A1) to the post-hyperpolarization peak (A2) <xref ref-type="bibr" rid="bib12">Barry, 2015</xref>; and the trough to peak duration (time in ms from the depolarization trough to post-hyperpolarization peak) which is the most common feature used in analyses of extracellular recordings (<xref ref-type="bibr" rid="bib153">Snyder et al., 2016</xref>; <xref ref-type="bibr" rid="bib78">Kaufman et al., 2013</xref>; <xref ref-type="bibr" rid="bib112">Merchant et al., 2012</xref>).</p><fig-group><fig id="fig4" position="float"><label>Figure 4.</label><caption><title>Gaussian mixture model clustering on specified features fails to capture the breadth of waveform diversity.</title><p>(<bold>A</bold>) The three EAP waveform landmarks used to generate the specified features passed to the GMM on a sample waveform. <inline-graphic mime-subtype="tiff" mimetype="image" xlink:href="elife-67490-inf1-v4.tif"/>is the pre-hyperpolarization peak (<bold>A1</bold>); <inline-graphic mime-subtype="tiff" mimetype="image" xlink:href="elife-67490-inf2-v4.tif"/>is the depolarization trough; and <inline-graphic mime-subtype="tiff" mimetype="image" xlink:href="elife-67490-inf3-v4.tif"/>is the post-hyperpolarization peak (<bold>A2</bold>). (<bold>B</bold>) A three-dimensional scatter plot with marginal distributions of waveforms and GMM classes on the three specified features in (<bold>A</bold>). Narrow-spiking (NS) are in red; broad-spiking (BS) in green; narrow-spiking triphasic (NST) in yellow; and broad-spiking triphasic (BST) types are in blue. Trough to peak was calculated as the time between <inline-graphic mime-subtype="tiff" mimetype="image" xlink:href="elife-67490-inf4-v4.tif"/>and <inline-graphic mime-subtype="tiff" mimetype="image" xlink:href="elife-67490-inf5-v4.tif"/>; peak ratio was determined as the ratio between the heights of <inline-graphic mime-subtype="tiff" mimetype="image" xlink:href="elife-67490-inf6-v4.tif"/>and <inline-graphic mime-subtype="tiff" mimetype="image" xlink:href="elife-67490-inf7-v4.tif"/>(A1/A2); and AP width was determined as the width of the depolarization trough <inline-graphic mime-subtype="tiff" mimetype="image" xlink:href="elife-67490-inf8-v4.tif"/>using the MLIB toolbox (<xref ref-type="bibr" rid="bib160">Stuttgen, 2019</xref>). (<bold>C</bold>) The optimal cluster number in the three-dimensional feature space in (<bold>B</bold>) was determined to be four clusters using the Bayesian information criterion (BIC) (<xref ref-type="bibr" rid="bib170">Trainito et al., 2019</xref>). The number of clusters was chosen to be at the &#8216;elbow&#8217; of the BIC curve (green chevron). (<bold>D</bold>) A confusion matrix for a gradient boosted decision tree classifier with 5-fold cross-validation with hyperparameter optimization. The main diagonal contains the classification accuracy percentages across the four GMM clusters and the off-diagonal contains the misclassification rates. The average accuracy across classes was 78%. (<bold>E</bold>) The same scatter plot of normalized EAP waveforms in UMAP space as in <xref ref-type="fig" rid="fig3">Figure 3A</xref> but now colored by GMM category. <xref ref-type="fig" rid="fig4s1">Figure 4&#8212;figure supplement 1</xref>: We show that <italic>WaveMAP</italic> clusterings are more consistent across random data subsets than either DBSCAN on t-SNE or a GMM on PCA. <xref ref-type="fig" rid="fig4s2">Figure 4&#8212;figure supplement 2</xref>: GMMs fail to full capture the latent structure in the waveforms.</p></caption><graphic mime-subtype="jpeg" mimetype="image" xlink:href="elife-67490.xml.media/fig4.jpg"/></fig><fig id="fig4s1" position="float" specific-use="child-fig"><label>Figure 4&#8212;figure supplement 1.</label><caption><title>WaveMAP clusterings are more consistent than either DBSCAN on t-SNE or a GMM on PCA.</title><p>(<bold>A</bold>) At top, DBSCAN (eps = 3, n_neighbors = 15) was applied to t-SNE&#8217;s projected space (perplexity = 30) over the full dataset producing 10 clusters.&#160;Parameters were chosen to produce similar structure to <italic>WaveMAP</italic> to facilitate comparison. At bottom, a Gaussian mixture model (n_components = 4, replicates = 25) is applied to the three-dimensional projected space (94% variance explained) produced by the first three principal components of the full dataset (GMM on PCA). The number of clusters chosen was by selecting the value at the elbow (green arrow) of a BIC &#177; S.D. vs. number of clusters plot shown in the inset. (<bold>B</bold>) Adjusted mutual information scores (AMI; mean &#177; S.E.M.) for 100 random sample clusterings of both <italic>WaveMAP</italic>, DBSCAN on t-SNE, and GMM on PCA. Standard error of the mean bars are smaller than marker size. The AMI was calculated by constructing a &#8216;reference&#8217; clustering by applying the respective method to the full dataset and then compared to the clusterings produced by random subsamples. (<bold>C</bold>) A jittered strip plot showing the AMI for 100 random subsets using each method. Subsets were randomly sampled from 40% (left) and 90% (right) of the full dataset. Gray boxes correspond to the data points surrounded by gray boxes in (<bold>B</bold>).</p></caption><graphic mime-subtype="jpeg" mimetype="image" xlink:href="elife-67490.xml.media/fig4-figsupp1.jpg"/></fig><fig id="fig4s2" position="float" specific-use="child-fig"><label>Figure 4&#8212;figure supplement 2.</label><caption><title>Comparison of GMM and UMAP in the constructed feature space.</title><p>(<bold>A</bold>) Three views of the eight <italic>WaveMAP</italic> clusters shown in the constructed feature space.&#160;The clusters maintain some structure but are largely mixed and linearly inseparable. (<bold>B</bold>) A GMM instantiated with eight clusters in the constructed feature space of <xref ref-type="fig" rid="fig4">Figure 4B</xref>. (<bold>C</bold>) Confusion matrix for a gradient boosted decision tree classifier with the same hyperparameters as the one trained on four GMM classes (see hyperparameters in <xref ref-type="table" rid="table1">Table 1</xref>). Numbers listed are in percent accuracy on the main diagonal and misclassification rate percentage on the off-diagonals against held-out data. (<bold>D</bold>) Each cluster of waveforms in the eight class GMM with average waveforms in black. (<bold>E</bold>) Both <italic>WaveMAP</italic> and a GMM on features were used on the full dataset to generate results of various cluster number and a gradient boosted decision tree (hyperparameters optimized to the four-class GMM) was trained on each. Shown is the classifier accuracy (mean &#177; S.D.) across stratified k-folds (k = 5) and various cluster number from 2 to 16. In dark blue, we generated <italic>WaveMAP</italic> mappings of different cluster number by changing the n_neighbors parameter associated with UMAP; in light blue, we generated <italic>WaveMAP</italic> mappings of various cluster number by changing resolution associated with Louvain. In red, we changed the n_components for the Gaussian mixture model.</p></caption><graphic mime-subtype="jpeg" mimetype="image" xlink:href="elife-67490.xml.media/fig4-figsupp2.jpg"/></fig></fig-group><p>The result of the GMM applied to these three measures is shown in <xref ref-type="fig" rid="fig4">Figure 4B</xref>. This method identified four waveform clusters that roughly separated into broad-spiking (BS, &#8764;33%, n = 208), narrow-spiking (NS, &#8764;43%, n = 269), broad-spiking triphasic (BST, &#8764;9%, n = 55), and narrow-spiking triphasic (NST, &#8764;15%, n = 93) (<xref ref-type="fig" rid="fig4">Figure 4B</xref>). Triphasic waveforms, thought to be neurons with myelinated axons or neurites (<xref ref-type="bibr" rid="bib12">Barry, 2015</xref>; <xref ref-type="bibr" rid="bib139">Robbins et al., 2013</xref>; <xref ref-type="bibr" rid="bib40">Deligkaris et al., 2016</xref>; <xref ref-type="bibr" rid="bib8">Bakkum et al., 2013</xref>; <xref ref-type="bibr" rid="bib161">Sun et al., 2021</xref>), contain an initial positive spike before the trough and can be identified by large peak ratios (<xref ref-type="fig" rid="fig4">Figure 4A</xref>). These GMM clusters are similar to those obtained from other clusterings of EAP&#8217;s in macaque cortex (<xref ref-type="bibr" rid="bib61">Gur et al., 1999</xref>; <xref ref-type="bibr" rid="bib170">Trainito et al., 2019</xref>). We selected four clusters by examining the Bayesian information citerion (BIC) statistic as a function of the number of clusters and identified the cluster number at the elbow (green chevron in <xref ref-type="fig" rid="fig4">Figure 4C</xref>).</p><p>To compare the generalizability of this representation with the representation provided by UMAP, we trained the same decision tree classifier on the waveform data (after separate hyperparameter tuning, <xref ref-type="table" rid="table1">Table 1</xref>) but this time using the four GMM classes as target labels. After training, the accuracy across all four classes averaged &#8764;78% with no classification accuracy over 95% and misclassifications between every class (<xref ref-type="fig" rid="fig4">Figure 4D</xref>). The classifier trained on specified features under-performed the classifier trained on the whole waveform found by <italic>WaveMAP</italic>. In <italic>WaveMAP</italic>, the individual classification accuracy of most classes exceeded 95% with few misclassifications between groups even though there were double the number of clusters. This result suggests that the clusters based on specified features are less differentiable than <italic>WaveMAP</italic> clusters even when a much lower cluster number is considered.</p><p>This deficit can be understood as an inability of the GMM to fully capture the latent structure of the data. If we examine the gray data point shadows (<xref ref-type="fig" rid="fig4">Figure 4B</xref>), no features contain clear clusters and neither do they contain Gaussian distributions which is an assumption of the GMM model. Examining the marginal distributions in <xref ref-type="fig" rid="fig4">Figure 4B</xref>, none of the features induce a clear separability between the clusters alone or in conjunction. Furthermore, the reproducible clusters found by <italic>WaveMAP</italic> are linearly inseparable in the feature space of the three GMM features (<xref ref-type="fig" rid="fig4s2">Figure 4&#8212;figure supplement 2A</xref>). Note, this is not an artifact of using a lower cluster number in the GMM as opposed to the eight found by <italic>WaveMAP</italic>. Even if the GMM is instantiated with eight clusters (<xref ref-type="fig" rid="fig4s2">Figure 4&#8212;figure supplement 2B</xref>), a classifier is still unable to generalize this clustering with even modest accuracy (average of 56% across clusters; <xref ref-type="fig" rid="fig4s2">Figure 4&#8212;figure supplement 2C</xref>) even if the waveforms shapes found by the GMM with eight clusters seem somewhat sensible (<xref ref-type="fig" rid="fig4s2">Figure 4&#8212;figure supplement 2D</xref>). In fact, across all cluster numbers (n_components from 2 to 16), a classifier tuned <italic>for the GMM</italic> performed more poorly on the GMM labels than a <italic>WaveMAP</italic> projection with the same number of clusters (<xref ref-type="fig" rid="fig4s2">Figure 4&#8212;figure supplement 2E</xref>, in red). Tuning <italic>WaveMAP</italic> parameters that induce different cluster numbers, whether n_neighbors (in dark blue) or resolution (in light blue), had little effect on classifier performance (<xref ref-type="fig" rid="fig4s2">Figure 4&#8212;figure supplement 2E</xref>, in blues). <italic>WaveMAP</italic> yielded mappings that were more generalizable than a GMM on features across every number of clusters and both parameters investigated. Thus, it is a deficit of the GMM on constructed feature-based approach to capture the full diversity of waveforms, especially at high cluster number, and not a peculiarity of the model parameters chosen or number of clusters induced.</p><p>We also investigated the representation of specified features in the projected UMAP space. We color coded the waveforms in UMAP, in <xref ref-type="fig" rid="fig5s1">Figure 5&#8212;figure supplement 1</xref>, according to each point&#8217;s feature values using the same features as in <xref ref-type="fig" rid="fig4">Figure 4</xref> (<xref ref-type="fig" rid="fig5s1">Figure 5&#8212;figure supplement 1A</xref>): AP width (<xref ref-type="fig" rid="fig5s1">Figure 5&#8212;figure supplement 1B</xref>), trough to peak duration (<xref ref-type="fig" rid="fig5s1">Figure 5&#8212;figure supplement 1C</xref>), and peak ratio (<xref ref-type="fig" rid="fig5s1">Figure 5&#8212;figure supplement 1D</xref>). We find that <italic>WaveMAP</italic> implicitly captures each of these specified features shown as a smooth gradient of values. Our method also exposes the correlation between certain specified features: the gradient between trough to peak duration and AP width points point roughly in the same direction so thus both features are highly correlated. This correlation between features exposes their redundancy and is another reason why traditional approaches fail to capture the full diversity of waveform shapes.</p><p>To obtain a clearer picture of how <italic>WaveMAP</italic> captures latent structure missed by specified features, we color the points in UMAP space by their GMM cluster identity in <xref ref-type="fig" rid="fig4">Figure 4E</xref>. Here, <italic>WaveMAP</italic> is able to recapitulate the same structure observed by specified features as a gradient from broad- to narrow-spiking along the UMAP-1 direction. Our technique also captures the transition from triphasic to biphasic along the UMAP-2 direction. <italic>WaveMAP</italic> is also able to find clusters that occupy an intermediate identity between GMM classes. For instance, <italic>WaveMAP</italic> cluster &#9313; (<xref ref-type="fig" rid="fig3">Figure 3A</xref>) is nearly equal parts broad- and narrow-spiking in the GMM clustering (<xref ref-type="fig" rid="fig4">Figure 4E</xref>). If a GMM were used, &#9313; would be split between two classes despite it having a distinct waveform shape characterized by a small pre-hyperpolarization peak, a moderate post-hyperpolarization peak, and relatively constant repolarization slope.</p></sec><sec id="s2-5"><title><italic>WaveMAP</italic> interpretably recapitulates and expands upon known waveform features</title><p>We have established that <italic>WaveMAP</italic> has the ability to discover extracellular waveform clusters, but a common contention with such non-linear methods is that they are uninterpretable. Here, using an interpretable machine learning approach, we show that <italic>WaveMAP</italic> produces sensible results (<xref ref-type="bibr" rid="bib114">Molnar, 2020</xref>; <xref ref-type="bibr" rid="bib6">Azodi et al., 2020</xref>). To identify the features our algorithm is paying attention to, we first computed the inverse mapping of the UMAP transform to probe the projected space in a systematic way. Second, we leverage the gradient boosted decision tree classifier in <xref ref-type="fig" rid="fig3">Figure 3C</xref> and used a decision tree implementation (path-dependent TreeSHAP <xref ref-type="bibr" rid="bib96">Lundberg et al., 2018</xref>) of SHapley Additive exPlanations (SHAP values <xref ref-type="bibr" rid="bib98">Lundberg and Lee, 2017</xref>; <xref ref-type="bibr" rid="bib97">Lundberg et al., 2020</xref>) to reveal what waveform features are implicitly used to differentiate clusters.</p><p>To quantify the differences between Louvain clusters, we applied a grid of &#8216;test points&#8217; to the UMAP projected space (<xref ref-type="fig" rid="fig5">Figure 5A</xref>, top) and inverted the transform at each location; each of these test points is a coordinate on a grid (black x&#8217;s) and shows the waveform associated with every point in the projected space (<xref ref-type="fig" rid="fig5">Figure 5A</xref>, bottom). On the bottom of <xref ref-type="fig" rid="fig5">Figure 5A</xref> is shown the waveform that corresponds to each point in UMAP space color-coded to the nearest cluster or to gray if there were no nearby clusters. As UMAP-1 increases, there is a smooth transition in the sign of the inflection of the repolarization slope (the second derivative) from negative to positive (slow to fast repolarization rate). That is, the post-hyperpolarization peak becomes more sharp as we increase in the UMAP-1 direction. As UMAP-2 increases, we see a widening of the post-hyperpolarization slope distinct from the change in its inflection (UMAP-1). These two UMAP dimensions recapitulate the known importance of hyperpolarization properties in clustering waveforms. Both hyperpolarization rate (proportional to trough to peak width) and hyperpolarization slope inflection (proportional to repolarization time) are separate but highly informative properties (<xref ref-type="bibr" rid="bib170">Trainito et al., 2019</xref>; <xref ref-type="bibr" rid="bib4">Ardid et al., 2015</xref>). Furthermore, since repolarization rate and post-hyperpolarization width associate with different UMAP dimensions, this implies that these two processes are somewhat independent factors shaping the waveform. Repolarization rates are goverened by potassium channel dynamics and may play an important in waveform shape (<xref ref-type="bibr" rid="bib154">Soares et al., 2017</xref>). Thus, <italic>WaveMAP</italic> not only finds an interpretable and smoothly varying low-dimensional space it also offers biological insights; in this case, how cell types might differ according to channel protein expression and dynamics.</p><fig-group><fig id="fig5" position="float"><label>Figure 5.</label><caption><title>WaveMAP provides interpretable representations that both validate and extend known and unknown features importances.</title><p>(<bold>A</bold>) <italic>WaveMAP</italic> applied to the EAP&#8217;s as in <xref ref-type="fig" rid="fig3">Figure 3A</xref> but overlaid with a grid of test points (black x&#8217;s, top) spanning the embedded space.&#160;At bottom, the inverse UMAP transform is used to show the predicted waveform at each test point. For each x above, the predicted waveform is shown, plotted, and assigned the color of the nearest cluster or in gray if no cluster is nearby. Note that there exists instability in the waveform shape (see waveforms at corners) as test points leave the learned embedded space. (<bold>B</bold>) The mean absolute SHAP values for 10 time points along all waveforms subdivided according to the SHAP values contributed by each <italic>WaveMAP</italic> cluster. These SHAP values were informed by applying path-dependent TreeSHAP to a gradient boosted decision tree classifier trained on the waveforms with the <italic>WaveMAP</italic> clusters as labels. In the inset, all waveforms are shown and in gold are shown the time points for which the SHAP values are shown on the left. Each vertical line is such that the most opaque line contains the greatest SHAP value across <italic>WaveMAP</italic> clusters; the least opaque, the smallest SHAP value. (<bold>C</bold>) Each averaged <italic>WaveMAP</italic> waveform cluster is shown with the three time points containing the greatest SHAP values for each cluster individually. As before, the SHAP value at each time point is proportional to the opacity of the gray vertical line also shown as a bar graph at left. <xref ref-type="fig" rid="fig5s1">Figure 5&#8212;figure supplement 1</xref>: <italic>WaveMAP</italic> implicitly captures waveform features (such as trough to peak or AP width) without the need for prior specification.</p></caption><graphic mime-subtype="jpeg" mimetype="image" xlink:href="elife-67490.xml.media/fig5.jpg"/></fig><fig id="fig5s1" position="float" specific-use="child-fig"><label>Figure 5&#8212;figure supplement 1.</label><caption><title>WaveMAP implicitly captures waveform features without the need for specification.</title><p>(<bold>A</bold>) Three waveform shape features used in traditional clustering approaches. The three EAP waveform landmarks used to generate the specified features passed to the GMM on a sample waveform.</p><p> <inline-graphic mime-subtype="tiff" mimetype="image" xlink:href="elife-67490-inf9-v4.tif"/>is the pre-hyperpolarization peak (<bold>A1</bold>); <inline-graphic mime-subtype="tiff" mimetype="image" xlink:href="elife-67490-inf10-v4.tif"/>is the depolarization trough; and <inline-graphic mime-subtype="tiff" mimetype="image" xlink:href="elife-67490-inf11-v4.tif"/>is the post-hyperpolarization peak (<bold>A2</bold>). AP width is the distance in time between the falling and rising phase of the depolarization trough at its full-width half minimum. The trough to peak duration is the distance between the minimum of the depolarization trough and the peak of the post-hyperpolarization peak. The peak ratio is the height (above zero) of the pre-hyperpolarization peak over the height (again, above zero) of the post-hyperpolarization peak. The same diagram as in <xref ref-type="fig" rid="fig4">Figure 4A</xref> but repeated here. (<bold>B, C, D</bold>) The waveform data points in the projected UMAP space and color coded according to their AP width, trough to peak duration, and peak ratio, respectively.</p></caption><graphic mime-subtype="jpeg" mimetype="image" xlink:href="elife-67490.xml.media/fig5-figsupp1.jpg"/></fig></fig-group><p>In <xref ref-type="fig" rid="fig5">Figure 5B</xref>, we made use of SHAP values to identify which aspects of waveform shape the gradient boosted decision tree classifier utilizes in assigning what waveform to which cluster (<xref ref-type="bibr" rid="bib98">Lundberg and Lee, 2017</xref>; <xref ref-type="bibr" rid="bib97">Lundberg et al., 2020</xref>). SHAP values build off of the game theoretic quantity of Shapley values (<xref ref-type="bibr" rid="bib150">Shapley, 1988</xref>; <xref ref-type="bibr" rid="bib159">&#352;trumbelj and Kononenko, 2014</xref>),&#160;which poses that each feature (point in time along the waveform) is of variable importance in influencing the classifier to decide whether the data point belongs to a specific class or not. Operationally, SHAP values are calculated by examining the change in classifier performance as each feature is obscured (the waveform&#8217;s amplitude at each time point in this case), one-by-one (<xref ref-type="bibr" rid="bib98">Lundberg and Lee, 2017</xref>). <xref ref-type="fig" rid="fig5">Figure 5B</xref> shows the top-10 time points in terms of mean absolute SHAP value (colloquially called &#8216;SHAP value&#8217;) and their location. It is important to note that not every time point is equally informative for distinguishing every cluster individually and thus each bar is subdivided into the mean absolute SHAP value contribution of the eight constituent waveform classes. For instance, the 0.7 ms location is highly informative for cluster &#9316; and the 0.3 ms point is highly informative for cluster &#9318;&#160;(<xref ref-type="fig" rid="fig5">Figure 5C</xref>).</p><p>In the inset is shown all waveforms along with each of the top ten time points (in gold) with higher SHAP value shown with more opacity. The time points with highest SHAP value tend to cluster around two different locations giving us an intuition for which locations are most informative for telling apart the Louvain clusters. For instance, the 0.5 to 0.65 ms region contains high variability amongst waveforms and is important in separating out broad- from narrow-spiking clusters. This region roughly contains the post-hyperpolarization peak which is a feature of known importance and incorporated into nearly every study of EAP waveform shape (see <xref ref-type="table" rid="table1">Table 1</xref> in <xref ref-type="bibr" rid="bib174">Vigneswaran et al., 2011</xref>). Similarly, SHAP values implicate the region around 0.3 ms to 0.4 ms as time points that are also of importance and these correspond to the pre-hyperpolarization peak which is notably able to partition out triphasic waveforms (<xref ref-type="bibr" rid="bib12">Barry, 2015</xref>). Importance is also placed on the location at 0.6 ms corresponding to the inflection point which is similarly noted as being informative (<xref ref-type="bibr" rid="bib170">Trainito et al., 2019</xref>; <xref ref-type="bibr" rid="bib10">Banaie Boroujeni et al., 2021</xref>). These methods also implicate other regions of interest that have not been previously noted in the literature to the best of our knowledge: two other locations are highlighted farther along the waveform at 1.1 and 1.27 ms and are important for differentiating &#9319; and &#9312; from the other waveforms. This result suggests that using only up to 1.0 ms or less of the waveform may obscure diversity.</p><p>In <xref ref-type="fig" rid="fig5">Figure 5C</xref>, we show the three locations that are most informative for delineating a specific cluster; these appear as gray lines with their opacity proportional to their SHAP importance. These individually&#160;informative features often do align with those identified as globally-informative but do so with cluster-specific weights. Put another way, not every time point is equally informative for identifying waveforms individually and these &#8216;most informative&#8217; parts of each waveform do&#160;not always perfectly align with globally&#160;informative features. In summary, <italic>WaveMAP</italic> independently and sensibly arrived at a more nuanced incorporation of the very same features identified in previous work&#8212;and several novel ones&#8212;using a completely unsupervised framework which obviated the need to specify waveform features.</p><p>In the second half of the paper, we investigate whether these clusters have distinct physiological (in terms of firing rate), functional, and laminar distribution properties which could give credence that <italic>WaveMAP</italic> clusters connect to cell types.</p></sec><sec id="s2-6"><title><italic>WaveMAP</italic> clusters have distinct physiological properties</title><p>A defining aspect of cell types is that they vary in their physiology and especially firing rate properties (<xref ref-type="bibr" rid="bib119">Mountcastle et al., 1969</xref>; <xref ref-type="bibr" rid="bib105">McCormick et al., 1985</xref>; <xref ref-type="bibr" rid="bib32">Connors and Gutnick, 1990</xref>; <xref ref-type="bibr" rid="bib126">Nowak et al., 2003</xref>; <xref ref-type="bibr" rid="bib31">Connors et al., 1982</xref>; <xref ref-type="bibr" rid="bib34">Contreras, 2004</xref>). However, these neuronal characterizations via waveform ex vivo are not always conserved when the same waveform types are observed in vivo during behavior (<xref ref-type="bibr" rid="bib158">Steriade, 2004</xref>; <xref ref-type="bibr" rid="bib157">Steriade et al., 1998</xref>). To connect our waveform clusters to physiological cell types in vivo, we identified each cluster&#8217;s firing rate properties. We performed several analyses using the firing rate (FR) in spikes per second (spikes/s) for each cluster during the decision-making task described in <xref ref-type="fig" rid="fig1">Figure 1</xref>.</p><p>The trial-averaged FRs are aligned to stimulus onset (stim-aligned) and separated into preferred (PREF, solid trace) or non-preferred (NONPREF, dashed trace) reach direction trials. This is shown for both broad- (<xref ref-type="fig" rid="fig6">Figure 6A</xref>) and narrow-spiking (<xref ref-type="fig" rid="fig6">Figure 6B</xref>) clusters. A neuron&#8217;s preferred direction (right or left) was determined as the reach direction in which it had a higher FR on average in the 100 ms time period before movement onset.</p><fig-group><fig id="fig6" position="float"><label>Figure 6.</label><caption><title>UMAP clusters exhibit distinct physiological properties.</title><p>(<bold>A</bold>) Stimulus-aligned trial-averaged firing rate (FR; spikes/s) activity in PMd for broad-spiking <italic>WaveMAP</italic> clusters.&#160;The traces shown are separated into trials for PREF direction reaches (solid traces) and NONPREF direction reaches (dashed traces) and across the corresponding <italic>WaveMAP</italic> clusters. Shaded regions correspond to bootstrapped standard error of the mean. Dashed vertical line is stimulus-onset time. (<bold>B</bold>) The same plots as in (<bold>A</bold>) but for narrow-spiking <italic>WaveMAP</italic> clusters. (<bold>C</bold>) Baseline median FR &#177; S.E.M. for the neurons in the eight different classes. Baselines were taken as the average FR from 200 ms of recording before checkerboard stimulus onset. (<bold>D</bold>) Median maximum FR &#177; S.E.M. for the neurons in the eight different clusters. This was calculated by taking the median of the maximum FR for each neuron across the entire trial. (<bold>E</bold>) Median FR range &#177; S.E.M. calculated as the median difference, per neuron, between its baseline and max FR. ---- p &lt; 0.05; ---- p &lt; 0.01; ---- p &lt; 0.005; Mann-Whitney <italic>U</italic> test, FDR adjusted. <xref ref-type="fig" rid="fig6s1">Figure 6&#8212;figure supplement 1</xref>: GMM clusters are less physiologically distinguishable than <italic>WaveMAP</italic> clusters.</p></caption><graphic mime-subtype="jpeg" mimetype="image" xlink:href="elife-67490.xml.media/fig6.jpg"/></fig><fig id="fig6s1" position="float" specific-use="child-fig"><label>Figure 6&#8212;figure supplement 1.</label><caption><title>GMM clusters are less physiologically distinguishable than <italic>WaveMAP</italic> clusters.</title><p>(<bold>A</bold>) Stimulus-aligned trial-averaged firing rate activity in PMd for GMM clusters.&#160;As in <xref ref-type="fig" rid="fig6">Figure 6</xref>, the traces are separated into PREF and NONPREF trials with solid and dashed lines respectively. Shaded regions correspond to bootstrapped standard error of the mean (S.E.M.). The dashed vertical line denotes the stimulus-onset time. (<bold>B</bold>) Baseline median firing rates (FR) &#177; S.E.M. for the four GMM clusters. Baselines were calculated as the average firing rate during the first 200 ms of the trial. (<bold>C</bold>) Median maximum FRs &#177; S.E.M. for the neurons in the four GMM clusters. This was caculated by taking the median of the maximum FR for each neuron across the entire trial. (<bold>D</bold>) Median FR range &#177; S.E.M. calculated as the median difference, per neuron, between its baseline and max FR. ---- p &lt; 0.05; ---- p &lt; 0.01;&#8212;&#8212; p &lt; 0.005; Mann-Whitney <italic>U</italic> test, FDR adjusted.</p></caption><graphic mime-subtype="jpeg" mimetype="image" xlink:href="elife-67490.xml.media/fig6-figsupp1.jpg"/></fig></fig-group><p>To further quantify the FR differences between clusters, we calculated three properties of the FR response to stimulus: baseline FR, max FR, and FR range.</p><sec id="s2-6-1"><title>Baseline FR</title><p>Cell types are thought to demonstrate different baseline FRs. We estimated baseline FR (<xref ref-type="fig" rid="fig6">Figure 6C</xref>) as the median FR across the 200 ms time period before the appearance of the red-green checkerboard and during the hold period after targets appeared for the broad (<xref ref-type="fig" rid="fig6">Figure 6A</xref>), and narrow-spiking clusters (<xref ref-type="fig" rid="fig6">Figure 6B</xref>). The broad-spiking clusters showed significant differences in baseline FR when compared against the narrow-spiking clusters (p = 0.0028, Mann-Whitney <italic>U</italic> test). Similar patterns were observed in another study of narrow- vs. broad-spiking neurons in PMd during an instructed delay task (<xref ref-type="bibr" rid="bib76">Kaufman et al., 2010</xref>). We also found that not all broad-spiking neurons had low baseline FR and not all narrow-spiking neurons had high baseline FR. The broad-spiking clusters &#9317; and &#9318; were not significantly different but both differed significantly from &#9319; in that their baseline FR was much higher (10.3 &#177; 0.7 and 13.2 &#177; 1.9 spikes/s vs. 7.6 &#177; 0.75 spikes/s [median &#177; bootstrap S.E.]; p = 0.0052, p = 0.0029 respectively, Mann-Whitney <italic>U</italic> test, FDR adjusted). The narrow-spiking clusters (<xref ref-type="fig" rid="fig6">Figure 6B</xref>, right) &#9313;, &#9314;, and &#9315; had relatively low median baseline FRs (7.5 &#177; 1.1, 7.4 &#177; 0.4, 6.5 &#177; 0.7 spikes/s, median &#177; bootstrap S.E.) and were not significantly different from one another but all were significantly different from &#9312; and &#9316; (p = 0.04, p = 2.8e-4, p = 2.8e-7, p = 4.9e-5, respectively, Mann-Whitney <italic>U</italic> test, FDR adjusted; see <xref ref-type="fig" rid="fig6">Figure 6C</xref>).</p></sec><sec id="s2-6-2"><title>Maximum FR</title><p>A second important property of cell types is their maximum FR (<xref ref-type="bibr" rid="bib119">Mountcastle et al., 1969</xref>; <xref ref-type="bibr" rid="bib105">McCormick et al., 1985</xref>; <xref ref-type="bibr" rid="bib32">Connors and Gutnick, 1990</xref>). We estimated the maximum FR for a cluster as the median of the maximum FR of neurons in the cluster in a 1200 ms period aligned to movement onset (800 ms before and 400 ms after movement onset; <xref ref-type="fig" rid="fig6">Figure 6D</xref>). In addition to significant differences in baseline FR, broad- vs. narrow-spiking clusters showed a significant difference in max FR (p = 1.60e-5, Mann-Whitney <italic>U</italic> test). Broad-spiking clusters were fairly homogeneous with low median max FR (24.3 &#177; 1.0, median &#177; bootstrap S.E.) and no significant differences between distributions. In contrast, there was significant heterogeneity in the FR&#8217;s of narrow-spiking neurons: three clusters (&#9312;, &#9314;, and &#9316;) had uniformly higher max FR (33.1 &#177; 1.1, median &#177; bootstrap S.E.) while two others (&#9313; and &#9315;) were uniformly lower in max FR (23.0 &#177; 1.4, median &#177; bootstrap S.E.) and were comparable to the broad-spiking clusters. Nearly each of the higher max FR narrow-spiking clusters were significantly different than each of the lower max FR clusters (all pairwise relationships p &lt; 0.001 except &#9314; to &#9315; which was p = 0.007, Mann-Whitney <italic>U</italic> test, FDR adjusted).</p></sec><sec id="s2-6-3"><title>FR range</title><p>Many neurons, especially inhibitory types, display a sharp increase in FR and also span a wide range during behavior (<xref ref-type="bibr" rid="bib76">Kaufman et al., 2010</xref>; <xref ref-type="bibr" rid="bib78">Kaufman et al., 2013</xref>; <xref ref-type="bibr" rid="bib25">Chandrasekaran et al., 2017</xref>; <xref ref-type="bibr" rid="bib72">Johnston et al., 2009</xref>; <xref ref-type="bibr" rid="bib69">Hussar and Pasternak, 2009</xref>). To examine this change over the course of a trial, we took the median difference across trials between the max FR and baseline FR per neuron to calculate the FR range. We again found the group difference between broad- and narrow-spiking clusters to be significant (p = 0.0002, Mann-Whitney <italic>U</italic> test). Each broad-spiking cluster (&#9317;, &#9318;, and &#9319;) had a median increase of around 10.8 spikes/s (10.8 &#177; 0.8, 10.7 &#177; 2.3, and 10.9 &#177; 1.9 spikes/s respectively, median &#177; bootstrap S.E.) and each was nearly identical in FR range differing by less than 0.2 spikes/s. In contrast, the narrow-spiking clusters showed more variation in their FR range&#8212;similar to the pattern observed for max FR. &#9312;, &#9314;, and &#9316; had a large FR range (20.3 &#177; 1.1 spikes/s, median &#177; bootstrap S.E.) and the clusters &#9314; and &#9315; had a relatively smaller FR range (13.4 &#177; 1.3 spikes/s, median &#177; bootstrap S.E.). These results demonstrate that some narrow-spiking clusters, in addition to having high baseline FR, highly modulated their FR over the course of a behavioral trial.</p><p>Such physiological heterogeneity in narrow-spiking cells has been noted before (<xref ref-type="bibr" rid="bib4">Ardid et al., 2015</xref>; <xref ref-type="bibr" rid="bib10">Banaie Boroujeni et al., 2021</xref>; <xref ref-type="bibr" rid="bib135">Quirk et al., 2009</xref>) and in some cases, attributed to different subclasses of a single inhibitory cell type (<xref ref-type="bibr" rid="bib134">Povysheva et al., 2013</xref>; <xref ref-type="bibr" rid="bib182">Zaitsev et al., 2009</xref>). Other work also strongly suggests that narrow-spiking cells contain excitatory neurons with distinct FR properties contributing to this diversity (<xref ref-type="bibr" rid="bib174">Vigneswaran et al., 2011</xref>; <xref ref-type="bibr" rid="bib127">Onorato et al., 2020</xref>).</p><p>Furthermore, if <italic>WaveMAP</italic> has truly arrived at a closer delineation of underlying cell types compared to previous methods, it should produce a &#8216;better&#8217; clustering of physiological properties beyond just a better clustering of waveform shape. To address this issue, we calculate the same firing rate traces and physiological properties as in <xref ref-type="fig" rid="fig6">Figure 6</xref> but with the GMM clusters (<xref ref-type="fig" rid="fig6s1">Figure 6&#8212;figure supplement 1</xref>). While the FR traces maintain the same trends (BS does not increase its FR prior to the split into PREF and NONPREF while NS does; compare to <italic>WaveMAP</italic> broad-spiking vs. narrow-spiking clusters respectively), much of the significant differences between clusters is lost across all physiological measures even though fewer groups are compared (<xref ref-type="fig" rid="fig6s1">Figure 6&#8212;figure supplement 1B,C and D</xref>). We also quantitatively estimate these differences by calculating the effect sizes (Cohen&#8217;s <italic>f<sup>2</sup></italic>) across the <italic>WaveMAP</italic> and GMM clusterings with a one-way ANOVA. The effect size was larger for <italic>WaveMAP</italic> vs. GMM clustering respectively for every physiological property: baseline FR (0.070 vs. 0.013), maximum FR (0.035 vs. 0.011), and FR range (0.055 vs. 0.034).</p></sec></sec><sec id="s2-7"><title><italic>WaveMAP</italic> clusters have distinct decision-related dynamics</title><p>Our analysis in the previous section showed that there is considerable heterogeneity in their physiological properties. Are these putative cell types also functionally different? Prior literature argues that neuronal cell types have distinct functional roles during cortical computation with precise timing. For instance, studies of macaque premotor (<xref ref-type="bibr" rid="bib155">Song and McPeek, 2010</xref>), inferotemporal (IT) (<xref ref-type="bibr" rid="bib120">Mruczek and Sheinberg, 2012</xref>), and frontal eye field (FEF) (<xref ref-type="bibr" rid="bib46">Ding and Gold, 2012</xref>) areas show differences in decision-related functional properties: between broad- and narrow-spiking neurons, narrow-spiking neurons exhibit choice-selectivity earlier than broad-spiking neurons. In the mouse, specific aspects of behavior are directly linked with inhibitory cell types (<xref ref-type="bibr" rid="bib130">Pinto and Dan, 2015</xref>; <xref ref-type="bibr" rid="bib51">Estebanez et al., 2017</xref>). Here, we examine the functional properties of each cluster based on two inferred statistics: choice-related dynamics and discrimination time.</p><sec id="s2-7-1"><title>Choice-related dynamics</title><p>The first property we assessed for these <italic>WaveMAP</italic> clusters was the dynamics of the choice-selective signal. The neural prediction made by computational models of decision-making (for neurons that covary with an evolving decision) is the build-up of average neural activity in favor of a choice is faster for easier compared to harder color coherences (<xref ref-type="bibr" rid="bib25">Chandrasekaran et al., 2017</xref>; <xref ref-type="bibr" rid="bib46">Ding and Gold, 2012</xref>; <xref ref-type="bibr" rid="bib140">Roitman and Shadlen, 2002</xref>). Build-up activity is measured by analyzing the rate of change of choice-selective activity vs. time. We therefore examined the differences in averaged stimulus-aligned choice-selectivity signals (defined as |left - right|) for different checkerboard color coherences for each cluster.</p><p>In <xref ref-type="fig" rid="fig7">Figure 7A and B</xref>, we show average choice-selectivity signals across the seven color coherence levels (<xref ref-type="fig" rid="fig7">Figure 7A</xref>, legend) for an example broad- (&#9317;) and narrow-spiking cluster (&#9312;). For &#9317; (<xref ref-type="fig" rid="fig7">Figure 7A</xref>), easier stimuli (higher coherence) only led to modest increases in the rate at which the choice selectivity signal increases. In contrast, &#9312; (<xref ref-type="fig" rid="fig7">Figure 7B</xref>) shows faster rates for the choice-selective signal as a function of coherence. We summarized these effects by measuring the rate of change for the choice-selective signal between 175 and 325 ms for stimulus-aligned trials in each coherence condition (dashed lines in <xref ref-type="fig" rid="fig7">Figure 7A,B</xref>). This rate of rise for the choice-selective signal (spikes/s/s) vs. coherence is shown for broad- (<xref ref-type="fig" rid="fig7">Figure 7C</xref>) and narrow-spiking (<xref ref-type="fig" rid="fig7">Figure 7D</xref>) clusters. The broad-spiking clusters demonstrate fairly similar coherence-dependent changes with each cluster being somewhat indistinguishable and only demonstrating a modest increase with respect to coherence. In contrast, the narrow-spiking clusters show a diversity of responses with &#9312; and &#9316; demonstrating a stronger dependence of choice-related dynamics on coherence compared to the other three narrow-spiking clusters which were more similar in response to broad-spiking neurons.</p><fig id="fig7" position="float"><label>Figure 7.</label><caption><title>UMAP clusters exhibit distinct functional properties.</title><p>(<bold>A</bold>) Average firing rate (FR) over time for &#9317; (used as a sample broad-spiking cluster) across trials of different color coherences.&#160;The gray-dashed lines indicate the linear regression lines used to calculate the FR rate of rise. (<bold>B</bold>) Average FR over time for &#9312; (used as a sample narrow-spiking cluster) across different color coherences. (<bold>C</bold>) FR rate of rise vs. color coherence for broad- and (<bold>D</bold>) narrow-spiking clusters. Error bars correspond to standard error of the mean across trials. (<bold>E</bold>) Bootstrapped median color coherence slope is shown with the bootstrapped standard error of the median for each cluster on a per-neuron basis. Coherence slope is a linear regression of the cluster-specific lines in the previous plots <bold>C</bold> and <bold>D</bold>. (<bold>F</bold>) Median bootstrapped discrimination time for each cluster with error bars as the bootstrapped standard error of the median. Discrimination time was calculated as the the amount of time after checkerboard appearance at which the choice-selective signal could be differentiated from the baseline FR (<xref ref-type="bibr" rid="bib25">Chandrasekaran et al., 2017</xref>). dotted&#160;line&#160;p &lt; 0.05; dashed&#160;line&#160;p &lt; 0.01; solid&#160;line&#160;p &lt; 0.005; Mann-Whitney <italic>U</italic> test, FDR adjusted.</p></caption><graphic mime-subtype="jpeg" mimetype="image" xlink:href="elife-67490.xml.media/fig7.jpg"/></fig><p>We further summarized these plots by measuring the dependence of the rate of rise of the choice-selective signal as a function of coherence measured as the slope of a linear regression performed on the rate of rise vs. color coherence for each cluster (<xref ref-type="fig" rid="fig7">Figure 7E</xref>). The coherence slope for broad-spiking clusters was moderate and similar to &#9313;, &#9314;, and &#9315; while the coherence slope for &#9312; and &#9316; was steeper. Consistent with <xref ref-type="fig" rid="fig7">Figure 7C and D</xref>, the choice selective signal for &#9312; and &#9316; showed the strongest dependence on stimulus coherence.</p></sec><sec id="s2-7-2"><title>Discrimination time</title><p>The second property that we calculated was the discrimination time for clusters which is defined as the first time in which the choice-selective signal (again defined as |left - right|) departed from the FR of the hold period. We calculated the discrimination time on a neuron-by-neuron basis by computing the first time point in which the difference in FR for the two choices was significantly different from baseline using a bootstrap test (at least 25 successive time points significantly different from baseline FR corrected for multiple comparisons <xref ref-type="bibr" rid="bib25">Chandrasekaran et al., 2017</xref>). Discrimination time for broad-spiking clusters (255 &#177; 94 ms, median &#177; bootstrap S.E.) was significantly later than narrow-spiking clusters (224 &#177; 89 ms, p &lt; 0.005, median &#177; bootstrap S.E., Mann-Whitney <italic>U</italic> test). Clusters &#9312; and &#9316;, with the highest max FRs (34.0 &#177; 1.4 and 33.0 &#177; 1.8 spikes/s, median &#177; S.E.) and most strongly modulated by coherence, had the fastest discrimination times as well (200.0 &#177; 4.9 and 198.5 &#177; 4.9 ms, median &#177; S.E.).</p><p>Together the analysis of choice-related dynamics and discrimination time showed that there is considerable heterogeneity in the properties of narrow-spiking neuron types. Not all narrow-spiking neurons are faster than broad-spiking neurons and choice-selectivity signals have similar dynamics for many broad-spiking and narrow-spiking neurons. &#9312; and &#9316; have the fastest discrimination times and strongest choice dynamics. In contrast, the broad-spiking neurons have uniformly slower discrimination times and weaker choice-related dynamics.</p></sec></sec><sec id="s2-8"><title><italic>WaveMAP</italic> clusters contain distinct laminar distributions</title><p>In addition to having certain physiological properties and functional roles, numerous studies have shown that cell types across phylogeny, verified by single-cell transcriptomics, are defined by distinct patterns of laminar distribution in cortex (<xref ref-type="bibr" rid="bib66">Hodge et al., 2019</xref>; <xref ref-type="bibr" rid="bib169">Tosches et al., 2018</xref>). Here, we examined the laminar distributions of <italic>WaveMAP</italic> clusters and compared them to laminar distributions of GMM clusters. The number of waveforms from each cluster was counted at each of sixteen U-probe channels separately. These channels were equidistantly spaced every 0.15 mm between 0.0 and 2.4 mm. This spanned the entirety of PMd which is approximately 2.5 mm in depth from the pial surface to white matter (<xref ref-type="bibr" rid="bib5">Arikuni et al., 1988</xref>). However, making absolute statements about layers is difficult with these measurements because of errors in aligning superficial electrodes with layer I across different days. This could lead to shifts in estimates of absolute depth; up to 0.15 mm (the distance between the first and second electrode) of variability is induced in the alignment process (see Materials&#160;and&#160;methods). However, relative comparisons are likely better preserved. Thus, we use relative comparisons to describe laminar differences between distributions and in comparison to anatomical counts in fixed tissue in later sections.</p><p>Above each column of <xref ref-type="fig" rid="fig8">Figure 8A and B</xref> are the laminar distributions for all waveforms in the associated set of clusters (in gray); below these are the laminar distributions for each cluster set&#8217;s constituent clusters. On the right (<xref ref-type="fig" rid="fig8">Figure 8C</xref>), we show the distribution of all waveforms collected at top in gray with each GMM cluster&#8217;s distribution shown individually below.</p><fig-group><fig id="fig8" position="float"><label>Figure 8.</label><caption><title>Laminar distribution of <italic>WaveMAP</italic> waveform clusters.</title><p>(<bold>A, B</bold>) The overall histogram for the broad- and narrow-spiking waveform clusters are shown at top across cortical depths on the left and right respectively (in gray); below are shown histograms for their constituent <italic>WaveMAP</italic> clusters.&#160;These waveforms are shown sorted by the cortical depth at which they were recorded from the (0.0 mm [presumptive pial surface] to 2.4 mm in 0.15 mm increments). Broad-spiking clusters were generally centered around middle layers and were less distinct in their differences in laminar distribution. Narrow-spiking clusters are shown on the right and were varied in their distribution with almost every cluster significantly varying in laminar distribution from every other. (<bold>C</bold>) Depth histograms for all waveforms collected (top, in gray) and every GMM cluster (below). dotted&#160;line&#160;p &lt; 0.05; dashed&#160;line&#160;p &lt; 0.01; solid&#160;line&#160;p &lt; 0.005; two-sample Kolmogorov-Smirnov Test, FDR adjusted. <xref ref-type="fig" rid="fig8s1">Figure 8&#8212;figure supplement 1</xref>: Composite figure showing each <italic>WaveMAP</italic> cluster with waveform, physiological, functional, and laminar distribution properties.</p></caption><graphic mime-subtype="jpeg" mimetype="image" xlink:href="elife-67490.xml.media/fig8.jpg"/></fig><fig id="fig8s1" position="float" specific-use="child-fig"><label>Figure 8&#8212;figure supplement 1.</label><caption><title>Detailed summary of each UMAP cluster and features.</title><p>(<bold>A, B</bold>) A detailed summary of broad- (<bold>A</bold>) and narrow-spiking (<bold>B</bold>) cluster waveform shapes, physiological measures, and laminar distribution.&#160;Each waveform shape is shown at left with the average waveform shown as a black trace. The average post-hyperpolarization peak position is shown with a black line. The three waveform features used in the GMM classification (<xref ref-type="fig" rid="fig4">Figure 4A</xref>) are shown in the middle as the mean &#177; S.E. The baseline and max FR for each cluster are subsequently shown in spikes/s (median &#177; bootstrap S.E.). Functional properties, discrimination time and coherence slope, are shown in milliseconds and spikes/s/s/% coherence (both shown in median &#177; bootstrap S.E.). Laminar distributions are also shown with each column in the histogram being the number of each waveform found at each channel location. Channels are spaced every 0.15 mm apart from 0.0 to 2.4 mm.</p></caption><graphic mime-subtype="jpeg" mimetype="image" xlink:href="elife-67490.xml.media/fig8-figsupp1.jpg"/></fig></fig-group><p>The overall narrow- and broad-spiking populations did not differ significantly according to their distribution (p = 0.24, Kolmogorov-Smirnov test). The broad-spiking cluster set of neurons (&#9317; , &#9318; , and &#9319;) are generally thought to contain cortical excitatory pyramidal neurons enriched in middle to deep layers (<xref ref-type="bibr" rid="bib121">Nandy et al., 2017</xref>; <xref ref-type="bibr" rid="bib105">McCormick et al., 1985</xref>). Consistent with this view, we found these broad-spiking clusters (<xref ref-type="fig" rid="fig8">Figure 8A</xref>) were generally centered around middle to deep layers with broad distributions and were not significantly distinguishable in laminarity (all comparisons p &gt; 0.05, two-sample Kolmogorov-Smirnov test, FDR adjusted).</p><p>In contrast, narrow-spiking clusters (<xref ref-type="fig" rid="fig8">Figure 8B</xref>) were distinctly varied in their distribution such that almost every cluster had a unique laminar distribution. Cluster &#9312; contained a broad distribution. It was significantly different in laminar distribution from clusters &#9313; and &#9315; (p = 0.002 and p = 0.013, respectively, two-sample Kolmogorov-Smirnov, FDR adjusted).</p><p>Cluster &#9313; showed a strongly localized concentration of neurons at a depth of 1.1 &#177; 0.33 mm (mean &#177; S.D.). It was significantly different from almost all other narrow-spiking clusters (p = 0.002, p = 1e-5, p = 0.010 for &#9312;, &#9315;, and &#9316; respectively; two-sample Kolmogorov-Smirnov test, FDR adjusted). Similarly, cluster &#9314; also showed a strongly localized laminar distribution but was situated more superficially than &#9313; with a heavier tail (1.0 &#177; 0.6 mm, mean &#177; S.D.).</p><p>Cluster &#9315; was uniquely deep in its cortical distribution (1.70 &#177; 0.44, mean &#177; S.D.). These neurons had a strongly triphasic waveform shape characterized by a large pre-hyperpolarization peak. These waveforms have been implicated as arising from myelinated excitatory pyramidal cells (<xref ref-type="bibr" rid="bib12">Barry, 2015</xref>), which are especially dense in this caudal region of PMd (<xref ref-type="bibr" rid="bib11">Barbas and Pandya, 1987</xref>).</p><p>The last cluster, &#9316;, like &#9312; was characterized by a broad distribution across cortical depths unique among narrow-spiking neurons and was centered around a depth of 1.3 &#177; 0.65 mm (mean &#177; S.D.) and present in all layers (<xref ref-type="bibr" rid="bib5">Arikuni et al., 1988</xref>).</p><p>Such laminar differences were not observed when we used GMM clustering. Laminar distributions for BS, BST, NS, and NST did not significantly differ from each other (<xref ref-type="fig" rid="fig8">Figure 8C</xref>; BS vs. BST had p = 0.067, all other relationships p &gt; 0.2; two-sample Kolmogorov-Smirnov test, FDR adjusted). Each GMM cluster also exhibited broad distributions across cortex which is at odds with our understanding of cell types using histology (discussed in the next section).</p></sec><sec id="s2-9"><title>Some narrow-spiking <italic>WaveMAP</italic> cluster laminar distributions align with inhibitory subtypes</title><p>We have shown that <italic>WaveMAP</italic> clusters have more distinct laminarity than GMM clusters. If <italic>WaveMAP</italic> clusters are consistent with cell type, we should expect their distributions to be relatively consistent with distributions from certain anatomical types visualized via immunohistochemistry (IHC). An especially well-studied set of non-overlapping anatomical inhibitory neuron types in the monkey are parvalbumin-, calretinin-, and calbindin-positive GABAergic interneurons (PV<sup>+</sup>, CR<sup>+</sup>, and CB<sup>+</sup> respectively) (<xref ref-type="bibr" rid="bib39">DeFelipe, 1997</xref>). Using IHC, we examined tissue from macaque rostral PMd stained for each of these three interneuron types. We then conducted stereological counting of each type averaged across six exemplars to quantify cell type distribution across cortical layers (see <xref ref-type="fig" rid="fig9">Figure 9A and B</xref>, <xref ref-type="bibr" rid="bib146">Schmitz et al., 2014</xref>) and compared it to the distributions in <xref ref-type="fig" rid="fig8">Figure 8</xref>.</p><fig id="fig9" position="float"><label>Figure 9.</label><caption><title>Anatomical labeling of three inhibitory interneuron types in PMd.</title><p>(<bold>A</bold>) Sample maximum intensity projection of immunohistological (IHC) staining of rostral PMd calbindin-positive (CB<sup>+</sup>) interneurons in blue. Note the many weakly-positive excitatory pyramidal neurons (arrows) in contrast to the strongly-positive interneurons (arrowheads). Only the interneurons were considered in stereological counting. In addition, only around first 1.5 mm of tissue is shown (top of layer V) but the full tissue area was counted down to the 2.4 mm (approximately the top of white matter). Layer IV exists as a thin layer in this area. Layer divisions were estimated based on depth and referencing <xref ref-type="bibr" rid="bib5">Arikuni et al., 1988</xref> (<xref ref-type="bibr" rid="bib5">Arikuni et al., 1988</xref>). (<bold>B</bold>) Sample maximum intensity projection of IHC staining of PMd calretinin-positive (CR<sup>+</sup>) and parvalbumin-positive (PV<sup>+</sup>) interneurons in yellow and fuschia respectively. The same depth of tissue and layer delineations were used as in (<bold>A</bold>). (<bold>C, D, E</bold>) Stereological manual counts (<xref ref-type="bibr" rid="bib146">Schmitz et al., 2014</xref>) (mean &#177; S.D.) of CB<sup>+</sup>, CR<sup>+</sup>, PV<sup>+</sup> cells in PMd, respectively. Counts were collected from six specimens, each with all three IHC stains, and with counts normalized to each sample. Source files for this figure are available on Dryad (<ext-link ext-link-type="uri" xlink:href="https://doi.org/10.5061/dryad.z612jm6cf">https://doi.org/10.5061/dryad.z612jm6cf</ext-link>).</p></caption><graphic mime-subtype="jpeg" mimetype="image" xlink:href="elife-67490.xml.media/fig9.jpg"/></fig><p>Both CB<sup>+</sup> and CR<sup>+</sup> cells (<xref ref-type="fig" rid="fig9">Figure 9C and D</xref>, respectively) exhibited a similarly restricted superficial distribution most closely resembling &#9314;. In addition, CR<sup>+</sup> and CB<sup>+</sup> cells are known to have very similar physiological properties and spike shape (<xref ref-type="bibr" rid="bib181">Zaitsev et al., 2005</xref>). An alternative possibility is that one of CR<sup>+</sup> or CB<sup>+</sup> might correspond to &#9313; and the other to &#9314; but this is less likely given their nearly identical histological distributions (<xref ref-type="fig" rid="fig9">Figure 9C and D</xref>) and similar physiology (<xref ref-type="bibr" rid="bib181">Zaitsev et al., 2005</xref>).</p><p>In contrast, <italic>WaveMAP</italic> cluster &#9312;, had laminar properties consistent with PV<sup>+</sup> neurons (<xref ref-type="fig" rid="fig9">Figure 9B</xref>): both were concentrated superficially but proliferated into middle layers (<xref ref-type="fig" rid="fig9">Figure 9E</xref>). In addition, there were striking physiological and functional similarities between &#9312; and PV<sup>+</sup> cells. In particular, both &#9312; and PV<sup>+</sup> cells have low baseline FR, early responses to stimuli and robust modulation of FR similar to PV<sup>+</sup> cells in mouse M1 (<xref ref-type="bibr" rid="bib51">Estebanez et al., 2017</xref>). Cluster &#9316; also had similar properties to &#9312; and could also correspond to PV<sup>+</sup> cells.</p><p>Together, these results from IHC suggest that the narrow-spiking clusters identified from <italic>WaveMAP</italic> potentially map on to different inhibitory types.</p></sec><sec id="s2-10"><title>Heterogeneity in decision-related activity emerges from both cell type and layer</title><p>Our final analysis examines whether these <italic>WaveMAP</italic> clusters can explain some of the heterogeneity observed in decision-making responses in PMd over and above previous methods (<xref ref-type="bibr" rid="bib25">Chandrasekaran et al., 2017</xref>). Heterogeneity in decision-related activity can emerge from cortical depth, different cell types within each layer, or both. To quantify the relative contributions of <italic>WaveMAP</italic> clusters and cortical depth, we regressed discrimination time on both separately and together and examined the change in variance explained (adjusted <inline-formula><mml:math id="inf6"><mml:msup><mml:mi>R</mml:mi><mml:mn>2</mml:mn></mml:msup></mml:math></inline-formula>). We then compared this against the GMM clusters with cortical depth to show that <italic>WaveMAP</italic> better explains the heterogeneity of decision-related responses.</p><p>We previously showed that some of the variability in decision-related responses is explained by the layer from which the neurons are recorded (<xref ref-type="bibr" rid="bib25">Chandrasekaran et al., 2017</xref>). Consistent with previous work, we found that cortical depth explains some variability in discrimination time (1.7%). We next examined if the <italic>WaveMAP</italic> clusters identified also explained variability in discrimination time: a categorical regression between <italic>WaveMAP</italic> clusters and discrimination time, explained a much larger 6.6% of variance. Including both cortical depth and cluster identity in the regression explained 7.3% of variance in discrimination time.</p><p>In contrast, we found that GMM clusters regressed against discrimination time only explained 3.3% of variance and the inclusion of both GMM cluster and cortical depth only explained 4.6% of variance.</p><p>Thus, we find that <italic>WaveMAP</italic> clustering explains a much larger variance relative to cortical depth alone. This demonstrates that <italic>WaveMAP</italic> clusters come closer to cell types than previous efforts and are not artifacts of layer-dependent decision-related inputs. That is, both the cortical layer in which a cell type is found as well <italic>WaveMAP</italic> cluster membership contributes to the variability in decision-related responses. Furthermore, <italic>WaveMAP</italic> clusters outperform GMM clusters as regressors of a functional property associated with cell types. These results further highlight the power of <italic>WaveMAP</italic> to separate out putative cell types and help us better understand decision-making circuits.</p></sec></sec><sec id="s3" sec-type="discussion"><title>Discussion</title><p>Our goal in this study was to further understand the relationship between waveform shape and the physiology, function , and laminar distribution of cell populations in dorsal premotor cortex during perceptual decision-making. Our approach was to develop a new method, <italic>WaveMAP</italic>, that combines a recently developed non-linear dimensionality reduction technique (UMAP) with graph clustering (Louvain community detection) to uncover hidden diversity in extracellular waveforms. We found this approach not only replicated previous studies by distinguishing between narrow- and broad-spiking neurons, but did so in a way that (1) revealed additional diversity, and (2) obviated the need to examine particular waveform features. In this way, our results demonstrate how traditional feature-based methods obscure biological detail that is more faithfully revealed by our <italic>WaveMAP</italic> method. Furthermore, through interpretable machine learning, we show our approach not only leverages many of the features already established as important in the literature but expands upon them in a more nuanced manner&#8212;all with minimal supervision or stipulation of priors. Finally, we show that the candidate cell classes identified by <italic>WaveMAP</italic> have distinct physiological properties, decision-related dynamics, and laminar distribution. The properties of each <italic>WaveMAP</italic> cluster are summarized in <xref ref-type="fig" rid="fig8s1">Figure 8&#8212;figure supplement 1A and B</xref> for broad- and narrow-spiking clusters, respectively.</p><p><italic>WaveMAP</italic> combines UMAP with high-dimensional graph clustering and interpretable machine learning to better identify candidate cell classes. Our approach might also be useful in other domains that employ non-linear dimensionality reduction such as computational ethology (<xref ref-type="bibr" rid="bib2">Ali et al., 2019</xref>; <xref ref-type="bibr" rid="bib67">Hsu and Yttri, 2020</xref>; <xref ref-type="bibr" rid="bib9">Bala et al., 2020</xref>), analysis of multi-scale population structure (<xref ref-type="bibr" rid="bib42">Diaz-Papkovich et al., 2019</xref>), and metascientific analyses of the literature (<xref ref-type="bibr" rid="bib124">Noichl, 2021</xref>). We also note that while traditional uses of non-linear dimensionality reduction and UMAP has been to data lacking autoregressive properties, such as transcriptomic expression (<xref ref-type="bibr" rid="bib16">Becht et al., 2019</xref>), this does&#160;not seem to be an issue for <italic>WaveMAP</italic>. Even though our waveforms have temporal autocorrelation, our method still is able to pick out interesting structure. Other work has found similar success in analyzing time series data with non-linear dimensionality reduction (<xref ref-type="bibr" rid="bib149">Sedaghat-Nejad et al., 2021</xref>; <xref ref-type="bibr" rid="bib43">Dimitriadis et al., 2018</xref>; <xref ref-type="bibr" rid="bib71">Jia et al., 2019</xref>; <xref ref-type="bibr" rid="bib60">Gouwens et al., 2020</xref>; <xref ref-type="bibr" rid="bib2">Ali et al., 2019</xref>).</p><sec id="s3-1"><title>Advantages of <italic>WaveMAP</italic> over traditional methods</title><p>At the core of <italic>WaveMAP</italic> is UMAP which has some advantages over other non-linear dimensionality reduction methods that have been applied in this context. Although most algorithms offer fast implementations that scale well to large input dimensionalities and volumes of data (<xref ref-type="bibr" rid="bib94">Linderman et al., 2019</xref>; <xref ref-type="bibr" rid="bib125">Nolet et al., 2020</xref>), UMAP also projects efficiently into arbitrary <italic>output</italic> dimensionalities while also returning an invertible transform. That is, we can efficiently project new data into any arbitrary dimensional projected space without having to recompute the mapping.</p><p>These properties provide three advantages over other non-linear dimensionality reduction approaches: First, our method is stable in the sense that it produces a consistent number of clusters and each cluster has the same members across random subsamples (<xref ref-type="fig" rid="fig3s1">Figure 3&#8212;figure supplement 1B</xref>). Clustering in the high-dimensional space rather than the projected space lends stability to our approach. Second, it allows exploration of any region of the projected space no matter the intuited latent dimensionality&#8212;this yields an intuitive understanding of how UMAP non-linearly transforms the data, which might be related to underlying biological phenomena. Thus, UMAP allows <italic>WaveMAP</italic> to go beyond a &#8216;discriminative model&#8217; typical of other clustering techniques and function as a &#8216;generative model&#8217; with which to make predictions. Third, it enables cross-validation of a classifier trained on cluster labels, impossible with methods that don&#8217;t return an invertible transform. To cross-validate unsupervised methods, unprocessed test data must be passed into a transform computed <italic>only</italic> on training data and evaluated with some loss function (<xref ref-type="bibr" rid="bib117">Moscovich and Rosset, 2019</xref>). This is only possible if an invertible transform is admitted by the method of dimensionality reduction as in UMAP.</p><p>A final advantage of UMAP is that it inherently allows for not just unsupervised but supervised and semi-supervised learning whereas some other methods do not (<xref ref-type="bibr" rid="bib143">Sainburg et al., 2020</xref>). This key difference enables &#8216;transductive inference&#8217; which is making predictions on unlabeled test points based upon information gleaned from labeled training points. This opens up a diverse number of novel applications in neuroscience through informing the manifold learning process with biological ground truths (in what is called &#8216;metric learning&#8217;) (<xref ref-type="bibr" rid="bib18">Bellet et al., 2013</xref>; <xref ref-type="bibr" rid="bib180">Yang and Jin, 2006</xref>). Experimentalists could theoretically pass biological ground truths to <italic>WaveMAP</italic> as training labels and &#8216;teach&#8217; <italic>WaveMAP</italic> to produce a manifold that more closely hews to true underlying diversity. For instance, if experimentalists &#8216;opto-tag&#8217; neurons of a particular cell type (<xref ref-type="bibr" rid="bib142">Roux et al., 2014</xref>; <xref ref-type="bibr" rid="bib41">Deubner et al., 2019</xref>; <xref ref-type="bibr" rid="bib71">Jia et al., 2019</xref>; <xref ref-type="bibr" rid="bib30">Cohen et al., 2012</xref>; <xref ref-type="bibr" rid="bib62">Hangya et al., 2015</xref>), this information can be passed along with the extracellular waveform to <italic>WaveMAP</italic> which would, in a semi-supervised manner, learn manifolds better aligned to biological truth.</p><p>A learned manifold could also be useful in future experiments to identify cell types in real-time without opto-tagging. This could be done by projecting the averaged waveforms found within an experiment into the learned <italic>WaveMAP</italic> manifold. This method would be especially useful in a scenario in which the number of electrodes exceeds the number of channels available to record from simultaneously and not all cell types are of equal interest to record (e.g. Neuropixels probes which have 960 electrodes but simultaneously record from only 384; <xref ref-type="bibr" rid="bib171">Trautmann et al., 2019</xref>; <xref ref-type="bibr" rid="bib73">Jun et al., 2017</xref>). We believe this is a rich area that can be explored in future work.</p><p><italic>WaveMAP</italic> uses a fully&#160;unsupervised method for separating and clustering waveform classes associated with distinct laminar distributions and functional properties in a decision-making task. One concern with fully unsupervised methods is that the features used for separation are unclear. However, by applying interpretable machine learning (<xref ref-type="bibr" rid="bib150">Shapley, 1988</xref>; <xref ref-type="bibr" rid="bib98">Lundberg and Lee, 2017</xref>), we showed that our unsupervised methods utilized many of the same waveform features derived by hand in previous work but did so in a single unifying framework. Our interpretable machine learning approach shows how each waveform feature delineates certain waveform clusters at the expense of others and&#8212;more importantly&#8212;shows how they can be optimally recombined to reveal the full diversity of waveform shapes.</p><p>Our novel approach of using non-linear dimensionality reduction with graph clustering on the population of extracellular action potentials compared to specified waveform features has parallels with the evolution of new approaches for the analysis of neuronal firing rates in relevant brain areas (<xref ref-type="bibr" rid="bib151">Shenoy et al., 2013</xref>; <xref ref-type="bibr" rid="bib28">Churchland et al., 2012</xref>; <xref ref-type="bibr" rid="bib102">Mante et al., 2013</xref>; <xref ref-type="bibr" rid="bib137">Remington et al., 2018</xref>; <xref ref-type="bibr" rid="bib177">Wang et al., 2018</xref>). Classically, the approach to analyzing firing rates involved in cognition was to develop simple metrics that separated neurons recorded in relevant brain areas. For instance, tuning is used to separate neurons in the motor (<xref ref-type="bibr" rid="bib55">Georgopoulos et al., 1986</xref>) and visual cortex (<xref ref-type="bibr" rid="bib68">Hubel and Wiesel, 1959</xref>). Similarly, visuomotor indices that categorize neurons along a visual to motor continuum are used to understand firing rates during various tasks in the frontal eye fields (<xref ref-type="bibr" rid="bib23">Bruce and Goldberg, 1985</xref>) and premotor cortex (<xref ref-type="bibr" rid="bib25">Chandrasekaran et al., 2017</xref>). However, these specified features quash other aspects of a firing rate profile in favor of focusing on only a few other aspects. New approaches to analyze firing rates use dimensionality reduction techniques such as principal component analysis (<xref ref-type="bibr" rid="bib151">Shenoy et al., 2013</xref>; <xref ref-type="bibr" rid="bib28">Churchland et al., 2012</xref>; <xref ref-type="bibr" rid="bib37">Cunningham and Yu, 2014</xref>), tensor component analysis (<xref ref-type="bibr" rid="bib179">Williams et al., 2018</xref>), demixed principal component analysis (<xref ref-type="bibr" rid="bib84">Kobak et al., 2016</xref>), targeted dimensionality reduction (<xref ref-type="bibr" rid="bib102">Mante et al., 2013</xref>), and autoencoder neural networks (<xref ref-type="bibr" rid="bib128">Pandarinath et al., 2018</xref>). These methods have provided insight into heterogeneous neural activity patterns in many brain areas without the need for specified features like tuning or a visuomotor index. Our study strongly suggests that non-linear dimensionality reduction methods applied to the entire extracellular waveform are better than using hand-derived waveform features such as trough to peak duration, repolarization time, spike width and other metrics. This progression from user-defined features to data-driven methods follows similar trends in the field of machine learning.</p></sec><sec id="s3-2"><title>Waveform cluster shapes are unlikely to arise from electrode placement</title><p>It is a possibility that the diversity of waveforms we observe is just an artifact of electrode placement relative to the site of discharge. This supposes that waveform shape changes with respect to the distance between the neuron and the electrode. This is unlikely because both in vitro studies (<xref ref-type="bibr" rid="bib40">Deligkaris et al., 2016</xref>) and computational simulations (<xref ref-type="bibr" rid="bib56">Gold et al., 2006</xref>) show distance from the soma mostly induces changes in amplitude. There is a small widening in waveform width but this occurs at distances in which the amplitude has attenuated below even very low spike thresholds (<xref ref-type="bibr" rid="bib56">Gold et al., 2006</xref>). We controlled for this cell-type-irrelevant variation in amplitude by normalizing spike troughs/peaks during preprocessing to be between &#8722;1 and +1. It should also be noted that without any normalization, all structure was lost in the UMAP projection which instead yielded one large point cloud (<xref ref-type="fig" rid="fig3s2">Figure 3&#8212;figure supplement 2E</xref>). Intuitively, this can be understood as UMAP allocating most of the projected space to explaining amplitude differences rather than shape variation. This can be visualized by coloring each point by the log of the amplitude of each spike (log of difference in maximum vs. minimum values) and observing that it forms a smooth gradient in the projected space (<xref ref-type="fig" rid="fig3s2">Figure 3&#8212;figure supplement 2F</xref>).</p><p>It is possible that differences that we observe in waveform shape could be due to recording from different morphological structures (dendrites, soma, or axons) rather than different cell types. However, we believe that most of our waveforms are from the soma. While it is true that there are some cell structures associated with different waveform shapes (such as triphasic waveforms near neurites, especially axons <xref ref-type="bibr" rid="bib12">Barry, 2015</xref>; <xref ref-type="bibr" rid="bib40">Deligkaris et al., 2016</xref>; <xref ref-type="bibr" rid="bib139">Robbins et al., 2013</xref>; <xref ref-type="bibr" rid="bib161">Sun et al., 2021</xref>), highly&#160;controlled in vitro studies show that a large majority of EAP&#8217;s are from somata (86%) (<xref ref-type="bibr" rid="bib40">Deligkaris et al., 2016</xref>). In concordance with these results, we only observed one cluster (&#9315;, 6% of all EAP&#8217;s) with a triphasic shape and these waveforms were only found in deep layers where myelination is prevalent. Thus, we believe that almost all of our waveforms come from somata, with the possible exclusion of &#9315;. Finally, we observed distinct physiological properties (<xref ref-type="fig" rid="fig6">Figure 6</xref>), decision-related dynamics (<xref ref-type="fig" rid="fig7">Figure 7</xref>), and laminar distribution (<xref ref-type="fig" rid="fig8">Figure 8</xref>) for each <italic>WaveMAP</italic> cluster. This would not be the case if the waveforms were just obtained from different compartments of the same neurons.</p><p>Given that electrode location has little effect on waveform shape, we might then ask what about a neuron&#8217;s waveform shape, in terms of cellular physiology, is captured by <italic>WaveMAP</italic>? We propose that the space found by UMAP-1 and UMAP-2 sensibly covaries according to documented properties of K<sup>+</sup> ion channel dynamics. As UMAP-1 increases, we observe a smooth transition of the inflection of the repolarization slope from negative to positive (slow to fast repolarization rate; <xref ref-type="fig" rid="fig5">Figure 5A</xref>). Said differently, the post-hyperpolarization peak becomes sharper as we increase in the UMAP-1 direction. These observations are consistent with the same gradual change in intracellular AP repolarization slope facilitated by the kinetics of the fast voltage-gated Kv3 potassium-channel in an activity-dependent manner (<xref ref-type="bibr" rid="bib74">Kaczmarek and Zhang, 2017</xref>). These channels are necessary for sustained high-frequency firing (<xref ref-type="bibr" rid="bib45">Ding et al., 2011</xref>). In the UMAP-2 direction, there is a smooth decrease in the width of the post-hyperpolarization peak and this direction roughly traverses from broad- to narrow-spiking to triphasic waveforms. This gradual change too has been noted as being associated with the kinetics of the Kv3 potassium-channel: blocking this channel in a dose-dependent manner with tetraethylammonium induces a gradual widening of post-hyperpolarization peak width (<xref ref-type="bibr" rid="bib50">Erisir et al., 1999</xref>; <xref ref-type="bibr" rid="bib15">Bean, 2007</xref>). Both of these changes in intracellular waveform shape likely have a strong effect on the shape of extracellular waveforms (<xref ref-type="bibr" rid="bib65">Henze et al., 2000</xref>).</p></sec><sec id="s3-3"><title>Reliance on waveform features might obscure cell type diversity</title><p>Our results show a greater proportion of narrow- (putatively inhibitory) vs. broad-spiking (putatively excitatory) neurons (69% vs. 31%, respectively); this is inconsistent with anatomical studies (<xref ref-type="bibr" rid="bib48">Dombrowski et al., 2001</xref>; <xref ref-type="bibr" rid="bib182">Zaitsev et al., 2009</xref>; <xref ref-type="bibr" rid="bib134">Povysheva et al., 2013</xref>). These studies demonstrate, through direct labeling of cell type, that in the macaque cortex, 65&#8211;80% of neurons are excitatory while 20&#8211;35% are inhibitory. We are not the only study to report this puzzling result: Onorato and colleagues (<xref ref-type="bibr" rid="bib127">Onorato et al., 2020</xref>) also report greater numbers of narrow-spiking compared to broad-spiking neurons in monkey V1. Thus, care must be taken when attempting links between spike waveform shape and cell type (<xref ref-type="bibr" rid="bib93">Lemon et al., 2021</xref>). A resolution to this discrepancy is to rethink equating narrow-spiking to inhibitory cells and broad-spiking to excitatory cells. Anatomical studies show that a substantial number of excitatory neurons in the monkey motor and visual cortices express the Kv3.1b potassium channel which is known to confer neurons with the ability to produce action potentials of narrow spike width and high firing rate (<xref ref-type="bibr" rid="bib33">Constantinople et al., 2009</xref>; <xref ref-type="bibr" rid="bib79">Kelly et al., 2019</xref>; <xref ref-type="bibr" rid="bib80">Kelly and Hawken, 2020</xref>; <xref ref-type="bibr" rid="bib70">Ichinohe et al., 2004</xref>; <xref ref-type="bibr" rid="bib93">Lemon et al., 2021</xref>). Furthermore, researchers have used antidromic stimulation to show that narrow-spiking neurons can be excitatory in motor and premotor cortex (<xref ref-type="bibr" rid="bib174">Vigneswaran et al., 2011</xref>; <xref ref-type="bibr" rid="bib93">Lemon et al., 2021</xref>).</p><p>We therefore believe prior studies have underexplored the diversity of classes accessed by their physiological recordings. Evidence of this is that histograms of peak width (and other specified features) across literature are often not cleanly bimodal (<xref ref-type="bibr" rid="bib87">Krimer et al., 2005</xref>; <xref ref-type="bibr" rid="bib183">Zhu et al., 2020</xref>) especially in premotor cortices (<xref ref-type="bibr" rid="bib112">Merchant et al., 2012</xref>). In addition, the relative proportions of narrow vs. broad is often dependent on the cutoff chosen which widely varies across studies (<xref ref-type="bibr" rid="bib174">Vigneswaran et al., 2011</xref>; <xref ref-type="bibr" rid="bib112">Merchant et al., 2012</xref>). Analyses like ours which look at entire waveforms&#8212;rather than a few specified features&#8212;extract this diversity from extracellular recordings whereas specified features mix waveform classes.</p></sec><sec id="s3-4"><title>Better parcellation of waveform variability leads to biological insight</title><p>We find that many narrow-spiking subtypes in PMd signal choice earlier than broad-spiking neurons in our decision-making task (<xref ref-type="fig" rid="fig7">Figure 7F</xref>). These observations are consistent with another study of PMd in monkeys in reach target selection and movement production (<xref ref-type="bibr" rid="bib155">Song and McPeek, 2010</xref>). In this study, narrow-spiking neurons signaled the selected target 25 ms earlier than broad-spiking neurons. Our results are also consistent with other studies of narrow- vs. broad-spiking neurons in the frontal eye fields (FEF) (<xref ref-type="bibr" rid="bib46">Ding and Gold, 2012</xref>) and inferior temporal area (IT) (<xref ref-type="bibr" rid="bib120">Mruczek and Sheinberg, 2012</xref>) during decision-making. In these studies, narrow-spiking neurons had higher firing rates before movement onset compared to broad-spiking neurons&#8212;a result consistent with our observations for some &#8216;narrow-spiking&#8217; PMd neurons. Our analyses recapitulate these results and provide additional insights into how different narrow-spiking cell types correlate with decisions. We reproduce the result that narrow-spiking cells, as a whole, have a faster discrimination time than broad-spiking cells but in addition we show that certain narrow-spiking cells respond as slowly as broad-spiking cells (&#9313; and &#9315;; <xref ref-type="fig" rid="fig7">Figure 7F</xref>). This lends further evidence to our theory that &#9313; and &#9315; are likely narrow-spiking excitatory cells. In contrast, &#9314; and &#9312; while both narrow-spiking, had distributions that more aligned with histologically-verified inhibitory types. In addition, &#9314; and &#9312; had physiological properties more in line with inhibitory cell types.</p><p><italic>WaveMAP</italic> suggests that narrow-spiking waveforms encompass many cell classes with distinct shape and laminar distribution. One of our narrow-spiking clusters (cluster &#9313;) was restricted to more superficial layers (<xref ref-type="fig" rid="fig8">Figure 8B</xref>) and had certain functional properties&#8212;low baseline firing rate and longer discrimination times&#8212;which are thought to be more closely aligned to properties of excitatory neurons (<xref ref-type="bibr" rid="bib155">Song and McPeek, 2010</xref>). Another narrow-spiking cluster, &#9315;, exhibited physiological and functional properties similar to &#9313; (all comparisons not significant in <xref ref-type="fig" rid="fig6">Figure 6C,D and E</xref> or <xref ref-type="fig" rid="fig7">Figure 7E and F</xref>) but with a distinct laminar distribution (<xref ref-type="fig" rid="fig8">Figure 8B</xref>) and highly triphasic waveform shape (<xref ref-type="fig" rid="fig8s1">Figure 8&#8212;figure supplement 1B</xref>). In contrast to &#9313;, which was concentrated in layer III, &#9315; was restricted to deep layers. These tri-phasic neurons could either be large corticospinal excitatory pyramidal cells (<xref ref-type="bibr" rid="bib70">Ichinohe et al., 2004</xref>; <xref ref-type="bibr" rid="bib154">Soares et al., 2017</xref>; <xref ref-type="bibr" rid="bib174">Vigneswaran et al., 2011</xref>), or axons (<xref ref-type="bibr" rid="bib12">Barry, 2015</xref>; <xref ref-type="bibr" rid="bib139">Robbins et al., 2013</xref>; <xref ref-type="bibr" rid="bib161">Sun et al., 2021</xref>).</p></sec><sec id="s3-5"><title>High-density probes and optogenetics can provide better insight into cell classes in the primate</title><p>Our recordings here were performed with 16 channel U-probes which provided reasonable estimates of laminar organization for these different putative cell classes. Use of high-density electrophysiological methods providing higher electrode counts perpendicular to the cortical surface would provide further insight into the laminar organization of different cell types (<xref ref-type="bibr" rid="bib73">Jun et al., 2017</xref>; <xref ref-type="bibr" rid="bib44">Dimitriadis et al., 2020</xref>). High-density recordings would allow us to perform <italic>WaveMAP</italic> in an additional dimension (across multiple electrodes) to increase confidence in identified cell classes (<xref ref-type="bibr" rid="bib118">Mosher et al., 2020</xref>) and localization of signal to somata (<xref ref-type="bibr" rid="bib71">Jia et al., 2019</xref>). Sensitive electrodes providing spatial access to neural activity (<xref ref-type="bibr" rid="bib73">Jun et al., 2017</xref>) can also improve our understanding of how these cell classes are organized both parallel and perpendicular to cortical surface (<xref ref-type="bibr" rid="bib144">Saleh et al., 2019</xref>; <xref ref-type="bibr" rid="bib118">Mosher et al., 2020</xref>) and across areas (<xref ref-type="bibr" rid="bib44">Dimitriadis et al., 2020</xref>). Access to cell types with high-density recordings would also allow for the identification of &#8216;me-types&#8217; through electromorphology (<xref ref-type="bibr" rid="bib60">Gouwens et al., 2020</xref>; <xref ref-type="bibr" rid="bib162">Tasic et al., 2018</xref>). This information could also help inform detailed models of cortical circuits that incorporate cell type information (<xref ref-type="bibr" rid="bib59">Gouwens et al., 2018</xref>; <xref ref-type="bibr" rid="bib21">Billeh et al., 2020</xref>; <xref ref-type="bibr" rid="bib136">Reimann et al., 2013</xref>).</p><p>Another powerful tool that has been leveraged in the study of cell types during behavior is optogenetics (<xref ref-type="bibr" rid="bib130">Pinto and Dan, 2015</xref>; <xref ref-type="bibr" rid="bib95">Lui et al., 2021</xref>; <xref ref-type="bibr" rid="bib88">Kvitsiani et al., 2013</xref>). Although in its infancy relative to its use in the mouse, optogenetics in monkeys offers direct interrogation of cell types. Future studies will allow us to more precisely link putative cell classes in vivo to function (<xref ref-type="bibr" rid="bib35">Courtin et al., 2014</xref>). NHP optogenetics is slowly advancing and efforts in many research groups around the world are producing new methods for in vivo optogenetics (<xref ref-type="bibr" rid="bib172">Tremblay et al., 2020</xref>). We expect future experiments using the promising new mDlx (<xref ref-type="bibr" rid="bib38">De et al., 2020</xref>) and h56d (<xref ref-type="bibr" rid="bib109">Mehta et al., 2019</xref>) promoter sequences to selectively opto-tag inhibitory neurons or PV<sup>+</sup> neurons directly (<xref ref-type="bibr" rid="bib176">Vormstein-Schneider et al., 2020</xref>) will greatly benefit validation of these derived cell classes. Finally, <italic>WaveMAP</italic>&#8217;s ability to find clusters of putative biological relevance using waveform shape alone encourages its application in settings where ground truth evaluation is particularly difficult to obtain such as in the human brain (<xref ref-type="bibr" rid="bib129">Paulk et al., 2021</xref>).</p></sec></sec><sec id="s4" sec-type="materials|methods"><title>Materials and methods</title><table-wrap id="keyresource" position="anchor"><label>Key resources table</label><table frame="hsides" rules="groups"><thead><tr><th>Reagent type (species) or resource</th><th>Designation</th><th>Source or reference</th><th>Identifiers</th><th>Additional <break/>information</th></tr></thead><tbody><tr><td>Primary antibody</td><td>Rabbit anti-calbindin D-28k (polyclonal)</td><td>Swant</td><td>Cat#: CB38 RRID:<ext-link ext-link-type="uri" xlink:href="https://scicrunch.org/resolver/AB_10000340">AB_10000340</ext-link></td><td>1:2000 dilution</td></tr><tr><td>Primary antibody</td><td>Rabbit anti-calretinin D-28k (polyclonal)</td><td>Swant</td><td>Cat#: 7697 RRID:<ext-link ext-link-type="uri" xlink:href="https://scicrunch.org/resolver/AB_2619710">AB_2619710</ext-link></td><td>1:2000 dilution</td></tr><tr><td>Primary antibody</td><td>Guinea pig anti-parvalbumin (polyclonal)</td><td>Swant</td><td>Cat#: GP72 RRID:<ext-link ext-link-type="uri" xlink:href="https://scicrunch.org/resolver/AB_2665495">AB_2665495</ext-link></td><td>1:2000 dilution</td></tr><tr><td>Secondary antibody</td><td>Donkey anti-rabbit Alexa 546</td><td>ThermoFisher</td><td>Cat#: A10040 RRID:<ext-link ext-link-type="uri" xlink:href="https://scicrunch.org/resolver/AB_2534016">AB_2534016</ext-link></td><td>1:200 dilution</td></tr><tr><td>Secondary antibody</td><td>Donkey anti-guinea pig Alexa 546</td><td>Jackson</td><td>Cat#: 706-545-148 RRID:<ext-link ext-link-type="uri" xlink:href="https://scicrunch.org/resolver/AB_2340472">AB_2340472</ext-link></td><td>1:200 dilution</td></tr></tbody></table></table-wrap><sec id="s4-1"><title>Code and data availability</title><p>All figures and figure supplements can be generated from the code and data included with the manuscript and uploaded to Dryad/Zenodo (RRID:<ext-link ext-link-type="uri" xlink:href="https://scicrunch.org/resolver/SCR_005910">SCR_005910</ext-link>/RRID:<ext-link ext-link-type="uri" xlink:href="https://scicrunch.org/resolver/SCR_004129">SCR_004129</ext-link>)&#160;(<ext-link ext-link-type="uri" xlink:href="https://doi.org/10.5061/dryad.z612jm6cf">https://doi.org/10.5061/dryad.z612jm6cf</ext-link>;&#160;<xref ref-type="bibr" rid="bib91">Lee et al., 2021</xref>) and on Github (<ext-link ext-link-type="uri" xlink:href="https://github.com/EricKenjiLee/WaveMAP_Paper">https://github.com/EricKenjiLee/WaveMAP_Paper</ext-link>). Pre-processing of raw averaged data was conducted in MATLAB (RRID:<ext-link ext-link-type="uri" xlink:href="https://scicrunch.org/resolver/SCR_001622">SCR_001622</ext-link>) using the files located in Preprocessing.zip (see contained README.md). <xref ref-type="fig" rid="fig1">Figure 1</xref> was generated using MATLAB whereas all other figures were generated in Python (RRID:<ext-link ext-link-type="uri" xlink:href="https://scicrunch.org/resolver/SCR_008394">SCR_008394</ext-link>) using the Jupyter/Google CoLab (RRID:<ext-link ext-link-type="uri" xlink:href="https://scicrunch.org/resolver/SCR_018315">SCR_018315</ext-link>/RRID:<ext-link ext-link-type="uri" xlink:href="https://scicrunch.org/resolver/SCR_018009">SCR_018009</ext-link>) notebook available with this manuscript. Please see the Readme.md file included in the zip file WaveMAP_Paper.zip for instructions on how to generate all manuscript figures and supplementary figures. Raw confocal fluorescence images with associated CellCounter annotations are also available&#160;(<xref ref-type="bibr" rid="bib91">Lee et al., 2021</xref>). Further information about <italic>WaveMAP</italic> and updated notebooks can also be obtained from the Chandrasekaran lab website at Boston University (<ext-link ext-link-type="uri" xlink:href="http://www.chandlab.org">http://www.chandlab.org</ext-link>).</p></sec><sec id="s4-2"><title>Subjects and surgery</title><p>Our experiments were conducted using two adult male macaque monkeys (<italic>Macaca mulatta</italic>; monkey T, 7 years, 14 kg; O, 11 years, 15.5 kg) that were trained to reach to visual targets for a juice reward. Our monkeys were housed in a social vivarium with a normal day/night cycle. This study was performed in strict accordance with the recommendations in the Guide for the Care and Use of Laboratory Animals of the National Institutes of Health. All&#160;the procedures were approved by the Stanford Administrative Panel on Laboratory Animal Care (APLAC, Protocol Number 8856 entitled &#8216;Cortical Processing of Arm Movements&#8217;). Surgical procedures were performed under anesthesia, and every effort was made to minimize suffering. Appropriate analgesia, pain relief, and antibiotics were administered to the animals when needed after surgical approval.</p><p>After initial training to come out of the cage and sit comfortably in a chair, monkeys underwent sterile surgery for implantation of head restraint holders (Crist Instruments, cylindrical head holder) and standard recording cylinders (Crist Instruments, Hagerstown, MD). We placed our cylinders over caudal PMd (+16, 15 stereotaxic coordinates) and surface normal to the cortex. We covered the skull within the cylinder with a thin layer of dental acrylic/PALACOS bone cement.</p></sec><sec id="s4-3"><title>Apparatus</title><p>Monkeys sat in a customized chair (Crist Instruments, Snyder Chair) with their head restrained via the surgical implant. The arm not used for reaching was loosely restrained using a tube and a cloth sling. Experiments were controlled and data were collected under a custom computer control system (xPC target and Psychtoolbox-3 [RRID:<ext-link ext-link-type="uri" xlink:href="https://scicrunch.org/resolver/SCR_002881">SCR_002881</ext-link>] <xref ref-type="bibr" rid="bib81">Kleiner et al., 2007</xref>). Visual stimuli were displayed on an Acer HN2741 computer screen placed approximately 30 cm from the monkey and a photodetector (Thorlabs PD360A) was used to record the onset of the visual stimulus at a 1 ms resolution. Every session, we taped a small infrared reflective bead (11.5 mm, NDI Digital passive spheres) 1 cm from the tip of the middle digit of the right hand (left hand, monkey O). The position of this bead was tracked optically in the infrared (60 Hz, 0.35 mm root mean square accuracy; Polaris system; Northern Digital).</p><p>Eye position was tracked with an overhead infrared camera made by ISCAN along with associated software (estimated accuracy of 1&#176;, ISCAN, Burlington, MA). To get a stable eye image for the overhead infrared camera, an infrared dichroic mirror was positioned at a 45&#176; angle (facing upward) immediately in front of the nose. This mirror reflected the image of the eye in the infrared range while letting visible light pass through. A visor placed around the chair prevented the monkey from touching the infrared mirror, the juice tube, or bringing the bead to their mouth.</p></sec><sec id="s4-4"><title>Behavioral training</title><p>Our animals were trained using the following operant conditioning protocol. First, the animal was rewarded for arm movements toward the screen and learnt to take pieces of fruit on the screen. Once the animal acquired the association between reaching and reward, the animal was conditioned to reach and touch a target for a juice reward. The position, as well as the color of this target, was then randomized as the monkey learned to touch targets of various colors at different locations on the screen. We then used a design in which the monkey first held the central hold for a brief period, and then a checkerboard cue, which was nearly 100% red or 100% green, appeared for 400&#8211;600 ms and finally the two targets appeared. The monkey received a reward for making a reach to the color of the target that matched the predominant color of the checkerboard cue. Two-target &#8216;Decision&#8217; blocks were interleaved with single target blocks to reinforce the association between checkerboard color and the correct target. After two weeks of training with this interleaved paradigm, the animal reliably reached to the target matching the color of the central checkerboard cue. We switched the paradigm around by adopting a design in which the targets appeared before the checkerboard cue onset. We initially trained on holding periods (where the monkeys view targets) from 300 to 1800 ms. We trained the animal to maintain the hold on the center until the checkerboard cue appeared by providing small amounts of juice at rough time intervals. When the animal reliably avoided breaking central hold during the hold period, we stopped providing the small amounts of juice for holding but maintained the juice reward for correct reaches. After the animal learned to stay still during the target viewing period, we introduced more difficult checkerboard cues (decreased color coherences) to the animal while reducing the maximal holding period to 900 ms. We then trained the animal to discriminate the checkerboard as accurately and as fast as possible while discouraging impulsivity by adopting timeouts.</p></sec><sec id="s4-5"><title>Electrophysiological recordings</title><p>To guide the stereotaxic coordinates for our eletrophysiological recordings we used known response-to-muscle palpation properties of PMd and M1. Our chambers were placed normal to the surface of cortex and aligned with the skull of the monkey. Recordings were performed perpendicular to the surface of the brain. Recordings were made anterior to the central sulcus, lateral to the spur of the arcuate sulcus, and lateral to the precentral dimple. For both monkeys, we were able to identify the upper and lower arm representation by repeated palpation at a large number of sites to identify muscle groups associated with the sites. Recordings were performed in the PMd and M1 contralateral to the arm used by the monkey. Monkey T used his right arm (O used his left arm) to perform tasks.</p><p>A subset of the electrophysiological recordings were performed using traditional single electrode recording techniques. Briefly, we made small burr holes through the PALACOS/acrylic using handheld drills. We then used a Narishige drive with a blunt guide tube placed in firm contact with the dura. Recordings were obtained using FHC electrodes to penetrate the overlying dura (UEWLGCSEEN1E, 110 mm long and 250 &#181;m thick electrodes with a standard blunt tip and profile, epoxylite insulation, and an impedance of 5&#8211;7 M&#937;) . Every effort was made to isolate single units during the recordings with FHC electrodes by online monitoring and seeking out well-isolated signals (see next section below).</p><p>We performed linear multi-contact electrode (U-probe) recordings in the same manner as single electrode recordings with some minor modifications. We used 180 &#181;m thick 16-electrode U-probes (15 &#181;m Pt/Ir electrode site diameter, 150 &#956;m spacing, circular shape, polyimide insulation, and secured in medical-grade epoxy. Electrode contacts were &#8764;100 K&#937; in impedance). We used a slightly sharpened guide tube to provide more purchase on dura. We also periodically scraped away, under ketamine-dexmetotomidine anesthesia, any overlying tissue on the dura. Both these modifications greatly facilitated penetration of the U-probe. We typically penetrated the brain at very slow rates (~2&#8211;5 &#956;m/s). Once we felt we had a reasonable sample population of neurons, potentially spanning different cortical layers, we stopped and waited for 45&#8211;60 min for the neuronal responses to stabilize. The experiments then progressed as usual.</p><p>We attempted to minimize the variability in U-probe placement on a session-by-session basis. Our approach was to place the U-probe so that the most superficial electrodes (electrodes 1, 2 on the 16 channel probe) were in layer I and able to record multi-unit spiking activity. Any further movement of the electrode upwards resulted in the disappearance of spiking activity and a change in the overall activity pattern of the electrode (suppression of overall LFP amplitudes). Similarly, driving the electrodes deeper resulted in multiphasic extracellular waveforms and also a change in auditory markers which were characterized by decreases in overall signal intensity and frequency content. Both markers suggested that the electrode entered white matter. Recording yields and electrode placement were in general much better in monkey T (average of ~16 units per session) than monkey O (average of ~nine units per session). We utilized these physiological markers as a guide to place electrodes and thus minimize variability in electrode placement on a session-by-session basis. Importantly, the variability in placement would act against our findings of depth-related differences shown in <xref ref-type="fig" rid="fig8">Figure 8</xref>.</p></sec><sec id="s4-6"><title>Identification of single neurons during recordings</title><p>Our procedure for identifying well-isolated single neurons was as follows: In the case of the single FHC tungsten electrode recordings, we moved the electrode and conservatively adjusted the threshold until we identified a well-demarcated set of waveforms. We took extra care to separate these waveforms from the noise and other smaller neurons. Our ability to isolate neurons was helped by the fact that these electrodes have a very small exposed area (hence high impedance) allowing for excellent isolation. Once a stable set of waveforms was identified, hoops from the Central software (Blackrock Microsystems) were used to demarcate the waveforms from noise and other potential single neurons. The electrode was allowed to settle for at least 15 min to ensure that the recording was stable. Once stability was confirmed, we began data collection. If we found that the recording was unstable, we discarded the neuron and moved the electrode to a newly isolated neuron and repeated the procedure. For a stable recording, we stored the waveform snippet and the time of the spike. Offline, we first visualized the waveforms in MATLAB by performing PCA. If we found that our online identification of the waveforms was inadequate, we either discarded the recording, identified it as a multi-unit (not used in this paper), or exported the data to Plexon Offline Sorter and redrew the cluster boundaries. We also took advantage of Plexon Offline Sorter&#8217;s ability to visualize how PCA features changed with time to ensure the quality and stability of our isolation. Finally, after redrawing cluster boundaries, we exported the data back to our analysis pipeline.</p><p>For our 16-channel Plexon U-Probe recordings, we again lowered the electrode until we found a stable set of waveforms. The small contact area of these electrode sites again ensured excellent identification and low levels of background noise (&#8764;10&#8211;20 &#181;V). We then waited at least 45 min until the recordings were very stable. Such an approach ensured that we minimized electrode drift. In our experience, the U-probes also have less drift than Neuropixel or other recording methods. We then again repeated the conservative thresholding and identification procedure outlined for the FHC electrodes. For U-probes, we did not move the electrodes once they had settled. Instead, we constantly monitored the recordings and any changes in a particular electrode over time led to the units from that electrode being discarded and not included in further analysis. Finally, the same offline procedures used for FHC electrodes were repeated for the U-probe recordings.</p></sec><sec id="s4-7"><title>Preprocessing of single-unit recordings</title><p>We obtained 996 extracellularly recorded single units (778 units recorded with the U-probe) from PMd across two monkeys (450 from Monkey O and 546 from Monkey T). Of these, we identified 801 units whose ISI violations (refractory period &#8804; 1.5 ms) &#8804; 1.5% (<xref ref-type="bibr" rid="bib25">Chandrasekaran et al., 2017</xref>). Our waveforms were filtered with a 4th-order 250 Hz high-pass Butterworth filter. The waveforms for each of the units were extracted for a duration of 1.6 ms with a pre-trough period of 0.4 ms, sampled at 30 kHz.</p></sec><sec id="s4-8"><title>Alignment and normalization of waveforms</title><p>In order to calculate the mean waveform for each single unit, we upsampled individual waveforms calculated over different trials by a factor of 10 and aligned them based on the method proposed in <xref ref-type="bibr" rid="bib78">Kaufman et al., 2013</xref>. For each waveform, we calculated its upswing slope (slope between trough to peak) and the downswing slope (slope to the trough) and re-aligned to the midpoint of the slope that exceeded the other by a factor of 1.5. Following this alignment, we chose the best set of waveforms for calculating the mean as those that satisfied the criteria (1) less the two standard deviations (S.D.) from the mean at each point and (2) average deviation from the mean across time was less than 0.4 (<xref ref-type="bibr" rid="bib78">Kaufman et al., 2013</xref>). The final set of waveforms for each unit was averaged and downsampled to 48 time points. Upon visual inspection, we then identified 761 units (625 single units with 490 U-probe recorded units) whose average waveforms qualified the criteria of exhibiting a minimum of two phases with trough occurring first. The remaining waveforms, unless stated otherwise here, were removed from the analysis. We excluded positive-spiking waveforms because of their association with axons (<xref ref-type="bibr" rid="bib161">Sun et al., 2021</xref>). Finally, we normalized the waveforms by dividing the extreme value of the amplitude such that the maximum deviation is &#177;1 unit (<xref ref-type="bibr" rid="bib153">Snyder et al., 2016</xref>).</p><p>It is important to note that the preprocessing we use, individual mean subtraction and &#177;1 unit normalization, operates independently of the data. Using another commonly used preprocessing normalization, normalization to trough depth (<xref ref-type="bibr" rid="bib76">Kaufman et al., 2010</xref>), we obtained extremely similar results. We found &#177;1 unit trough to peak normalization had virtually the same number of clusters as normalization to trough (<inline-formula><mml:math id="inf7"><mml:mrow><mml:mn>8.29</mml:mn><mml:mo>&#177;</mml:mo><mml:mn>0.84</mml:mn></mml:mrow></mml:math></inline-formula> vs. <inline-formula><mml:math id="inf8"><mml:mrow><mml:mn>8.16</mml:mn><mml:mo>&#177;</mml:mo><mml:mn>0.65</mml:mn></mml:mrow></mml:math></inline-formula> clusters, mean &#177; S.D.; <xref ref-type="fig" rid="fig3s2">Figure 3&#8212;figure supplement 2A and C</xref>). Furthermore, both normalizations picked out the same structure (<xref ref-type="fig" rid="fig3s2">Figure 3&#8212;figure supplement 2B and D</xref>); the normalization to trough did have a 9th cluster splitting off of &#9316; but this was something also seen with &#177;one unit trough to peak normalization in certain data subsets as well.</p></sec><sec id="s4-9"><title>A step-by-step guide to UMAP and Louvain clustering in <italic>WaveMAP</italic></title><p>To provide the reader with an intuitive overview of <italic>WaveMAP</italic>, we provide a step-by-step exposition of the different stages in the workflow shown in <xref ref-type="fig" rid="fig2">Figure 2</xref> beginning with UMAP followed by Louvain community detection. UMAP is a non-linear method that enables the capture of latent structures in high-dimensional data as a graph. This graph can then be used to visualize the latent structure in a low-dimensional embedding (<xref ref-type="bibr" rid="bib106">McInnes et al., 2018</xref>). For a detailed description of the methods, please refer to the Supplemental Information or respective references for UMAP (<xref ref-type="bibr" rid="bib106">McInnes et al., 2018</xref>) and Louvain community detection (<xref ref-type="bibr" rid="bib22">Blondel et al., 2008</xref>).</p><sec id="s4-9-1"><title><xref ref-type="fig" rid="fig2">Figure 2A&#8211;i</xref></title><p>We pass 625 normalized single-unit waveforms into UMAP. This normalization is crucial for exposing interesting structure in downstream analysis, although the particular normalization is less important (<xref ref-type="fig" rid="fig3s2">Figure 3&#8212;figure supplement 2</xref>). UMAP uses a five-step (ii.a to ii.e in <xref ref-type="fig" rid="fig2">Figure 2A</xref>) procedure to construct a weighted high-dimensional graph.</p></sec><sec id="s4-9-2"><title><xref ref-type="fig" rid="fig2">Figure 2A&#8211;ii.a</xref></title><p>In the first step, the data for each waveform is viewed in its original (sometimes called &#8216;ambient&#8217;) 48-dimensional space with each dimension corresponding to one of 48 time points along the waveform recording.</p></sec><sec id="s4-9-3"><title><xref ref-type="fig" rid="fig2">Figure 2A&#8211;ii.b</xref></title><p>A local metric is then assigned to each data point such that a unit ball (distance of one) surrounding it extends to the 1st-nearest neighbor. This ensures that every point is connected to at least one other point.</p></sec><sec id="s4-9-4"><title><xref ref-type="fig" rid="fig2">Figure 2A&#8211;ii.c</xref></title><p>Beyond this first connection, the distances to the next <inline-formula><mml:math id="inf9"><mml:mrow><mml:mo mathvariant="normal" stretchy="false">(</mml:mo><mml:mrow><mml:mi>k</mml:mi><mml:mo mathvariant="normal">-</mml:mo><mml:mn mathvariant="normal">1</mml:mn></mml:mrow><mml:mo mathvariant="normal" stretchy="false">)</mml:mo></mml:mrow></mml:math></inline-formula>-nearest neighbors increases according to an exponential distribution scaled by the local density. This is shown as a &#8216;glow&#8217; around each of the unit balls in <xref ref-type="fig" rid="fig2">Figure 2A&#8211;ii</xref>.c.</p></sec><sec id="s4-9-5"><title><xref ref-type="fig" rid="fig2">Figure 2A&#8211;ii.d</xref></title><p>The distances from the local point to the <inline-formula><mml:math id="inf10"><mml:mrow><mml:mi>k</mml:mi><mml:mo mathvariant="normal">-</mml:mo><mml:mn mathvariant="normal">1</mml:mn></mml:mrow></mml:math></inline-formula> data points beyond the unit ball are made to be probabilistic (&#8216;fuzzy&#8217;) according to their distance (k = four in <xref ref-type="fig" rid="fig2">Figure 2A&#8211;ii</xref>.d with some low weight connections omitted for clarity). This also means that the metric around each data point has a different notion of &#8216;how far&#8217; their neighbors are. Distances are shorter in dense regions (with respect to the ambient space) than are distances in sparser regions leading to a graph with asymmetric edge weights. If the notion of a probabilistic connection is confusing, this construction can just be understood as an asymmetric directed graph with edge weights between zero and one. One way to understand this is through the following real life example: to someone living in a dense city, traveling several miles may seem very far, while for a rural resident, this distance might be trivial even if the absolute distance is the same.</p></sec><sec id="s4-9-6"><title><xref ref-type="fig" rid="fig2">Figure 2A&#8211;ii.e</xref></title><p>The edge weights between any two data points, <inline-formula><mml:math id="inf11"><mml:mi>a</mml:mi></mml:math></inline-formula> and <inline-formula><mml:math id="inf12"><mml:mi>b</mml:mi></mml:math></inline-formula>, are &#8216;averaged together&#8217; according to the formula <inline-formula><mml:math id="inf13"><mml:mrow><mml:mrow><mml:mi>a</mml:mi><mml:mo mathvariant="normal">+</mml:mo><mml:mi>b</mml:mi></mml:mrow><mml:mo mathvariant="normal">-</mml:mo><mml:mrow><mml:mi>a</mml:mi><mml:mo mathvariant="normal">&#8901;</mml:mo><mml:mi>b</mml:mi></mml:mrow></mml:mrow></mml:math></inline-formula> known as probabilistic sum. This results in a graph that now has symmetric edge weights.</p></sec><sec id="s4-9-7"><title><xref ref-type="fig" rid="fig2">Figure 2Biv</xref></title><p>However, before we project this graph into lower dimension, we first apply clustering to the high-dimensional graph with a method known as Louvain community detection. This method proceeds in two steps per pass: modularity optimization and community aggregation (<xref ref-type="fig" rid="fig2s1">Figure 2&#8212;figure supplement 1B</xref>).</p></sec><sec id="s4-9-8"><title><xref ref-type="fig" rid="fig2">Figure 2B-iv.a</xref>:</title><p>In the first step of Louvain, each node in the graph is assigned to its own &#8216;community&#8217; which can be interpreted as its own cluster. Next, each node will join a neighboring node&#8217;s community such as to maximize an objective function known as &#8216;modularity score&#8217; (see Supplemental Information for the exact equation). This score is maximized when the sum of the weighted edges within a community is maximal relative to the sum of the weighted edges incident on the community from nodes outside the community. This procedure operates on all nodes until modularity can no longer be increased across the network; this concludes the modularity optimization step. In the next step, community aggregation, all nodes in a community are collapsed into a single node to create a new network. This completes the first pass of Louvain which then repeats modularity optimization and aggregation on this new graph until modularity is once again maximized. This continues until modularity no longer increases across hierarchies of graphs. Note that the resolution parameter is set to one in the work of <xref ref-type="bibr" rid="bib22">Blondel et al., 2008</xref> but we use an implementation of Louvain that allows for changing of this parameter according to the definition of modularity given in <xref ref-type="bibr" rid="bib89">Lambiotte, 2007</xref>. The resolution parameter gives the user some ability to specify how large of a community they expect as might be related to a phenomenon of interest and should be chosen empirically.</p></sec><sec id="s4-9-9"><title><xref ref-type="fig" rid="fig2">Figure 2B-iv.b</xref></title><p>The final Louvain community memberships are propagated back to the original nodes and assigned as labels for the associated data points which completes the classification step.</p></sec></sec><sec id="s4-10"><title><xref ref-type="fig" rid="fig2">Figure 2B&#8211;v</xref></title><p>In the second step of UMAP, graph layout, the high-dimensional graph with symmetric edge weights from the previous step is projected down into some lower dimension (here it is two dimensions).</p><p>To initialize this process, the graph is first passed through a Laplacian eigenmap (<xref ref-type="bibr" rid="bib17">Belkin and Niyogi, 2002</xref>) which helps regularize the initial embedding of the graph in low dimension (<xref ref-type="bibr" rid="bib85">Kobak and Linderman, 2019</xref>).</p><sec id="s4-10-1"><title><xref ref-type="fig" rid="fig2">Figure 2B&#8211;v.a</xref></title><p>We know the graph in high dimension but we have&#160;not yet found this graph in low dimension. From here a force directed graph layout procedure is used to align the initialized low-dimensional graph with the one found in high dimension. The force directed graph layout procedure minimizes the difference in distances in the instantiated graph compared to the high-dimensional one by alternatingly applying an attractive and a repulsive force according to the edge weights. These forces are chosen to minimize the cross-entropy between the graphs. This process ensures that points close in high dimension but far in low dimension are brought together (attraction) and those that are far in high dimension but close in low dimension are pushed apart (repulsion).</p></sec><sec id="s4-10-2"><title><xref ref-type="fig" rid="fig2">Figure 2B&#8211;v.b</xref></title><p>A final embedding of the data is found using stochastic gradient descent but by fixing the seed in our procedure, we enforce standard gradient descent. Although this requires more memory and is less performant, it guarantees that embeddings will look the same (even if this doesn&#8217;t affect clustering).</p></sec><sec id="s4-10-3"><title><xref ref-type="fig" rid="fig2">Figure 2B&#8211;vi</xref></title><p>In the final step of our method, we combine the labels found through Louvain clustering with a low-dimensional embedding to arrive at our <italic>WaveMAP</italic> solution.</p></sec></sec><sec id="s4-11"><title>WaveMAP parameter selection and validation</title><p>The normalized extracellular waveforms were passed into the Python package umap 0.4.0rc3 (RRID:<ext-link ext-link-type="uri" xlink:href="https://scicrunch.org/resolver/SCR_018217">SCR_018217</ext-link>) (<xref ref-type="bibr" rid="bib106">McInnes et al., 2018</xref>) with the parameters shown in <xref ref-type="table" rid="table1">Table 1</xref>. The n_neighbors value was increased to 20 to induce more emphasis on global structure. UMAP utilizes a stochastic k-nearest neighbor search to establish the graph and stochastic gradient descent to arrive at the embedding thus it produces similar but different embeddings in the projected space. For reproducibility reasons, the random_state was fixed in the algorithm and in numpy. The choice of random seed only impacted the projection and not the clustering (<xref ref-type="fig" rid="fig3s1">Figure 3&#8212;figure supplement 1A</xref>). From here, the graph provided by umap.graph_ was passed into the Louvain community detection algorithm to generate the clustering seen in <xref ref-type="fig" rid="fig3">Figure 3A</xref>. For details of the UMAP algorithm, see Supplementary Information.</p><p>Graph networks are often hierarchical and it has been recommended that the Louvain resolution parameter be chosen to elicit the phenomenon of interest (<xref ref-type="bibr" rid="bib131">Porter et al., 2009</xref>; <xref ref-type="bibr" rid="bib90">Lambiotte et al., 2008</xref>). To select the resolution parameter <inline-formula><mml:math id="inf14"><mml:mi>t</mml:mi></mml:math></inline-formula>, we chose a value that best maximized modularity score (a measure of the ratio between connections within a cluster vs. incoming from outside of it; see Supplementary Information) while still returning an statistically analyzable number of clusters (n &gt; 20). We selected a resolution parameter (green marker on <xref ref-type="fig" rid="fig3">Figure 3B</xref>) that maximized modularity score of Louvain clustering while still returning clusters of <inline-formula><mml:math id="inf15"><mml:mrow><mml:mi>n</mml:mi><mml:mo>&gt;</mml:mo><mml:mn>20</mml:mn></mml:mrow></mml:math></inline-formula> to allow for downstream statistical analyses. We note that this was also very close to the &#8216;elbow&#8217; in terms of number of clusters; this verifies that we have reached near-optimality in a second sense of obtaining stable cluster number. These scores were calculated over 25 random UMAP instantiations of 80% of the full dataset in 5% intervals. For algorithmic details of Louvain clustering, see Supplementary Information.</p><p>To validate that our parameter selection was stable and produced the same number of clusters reliably, we used a bootstrap and applied the <italic>WaveMAP</italic> procedure to random subsets of the full dataset (<xref ref-type="fig" rid="fig3s1">Figure 3&#8212;figure supplement 1B</xref>, <xref ref-type="bibr" rid="bib167">Tibshirani and Walther, 2005</xref>). We obtained 100 random samples from 10% to 90% of the full data set in 10% increments while simultaneously choosing a different random seed for the UMAP algorithm each time. We calculated both the number of Louvain clusters and the adjusted mutual information score (AMI) across these random samples and plot it on the same graph <xref ref-type="fig" rid="fig3s1">Figure 3&#8212;figure supplement 1B</xref>, red and green. The AMI is a measure of how much &#8216;information&#8217; is shared between a pair of clusterings with information specifically as Shannon entropy. The Shannon entropy of a given clustering (often called a &#8216;partitioning&#8217;), <inline-formula><mml:math id="inf16"><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:mi>H</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mi>X</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mstyle></mml:math></inline-formula>, is defined as,<disp-formula id="equ1"><mml:math id="m1"><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:mi>H</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mi>X</mml:mi><mml:mo stretchy="false">)</mml:mo><mml:mo>=</mml:mo><mml:mo>&#8722;</mml:mo><mml:munderover><mml:mo>&#8721;</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:munderover><mml:mi>P</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo stretchy="false">)</mml:mo><mml:mi>log</mml:mi><mml:mo>&#8289;</mml:mo><mml:mrow><mml:mi>P</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:mstyle></mml:math></disp-formula>with the probability <inline-formula><mml:math id="inf17"><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:mi>P</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mstyle></mml:math></inline-formula> pertaining to the probability that a certain data point <inline-formula><mml:math id="inf18"><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:mrow></mml:mstyle></mml:math></inline-formula> will belong to a certain cluster. The clustering entropy can be understood as how &#8216;unpredictable&#8217; the results of a random variable are (in this case the random variable is the particular clustering solution). For instance, a fair coin is less predictable (greater entropy) than a loaded coin (lower entropy).</p><p>Intuitively, AMI is how much information we receive about one variable given an observation of another and vice versa (<xref ref-type="bibr" rid="bib168">Timme and Lapish, 2018</xref>) (or how much knowing one clustering allows us to predict another) corrected for random chance. Thus, it&#8217;s bounded between 0 and 1 with 0 for two completely independent variables and one for completely identical variables. Formally, un-adjusted mutual information, <inline-formula><mml:math id="inf19"><mml:mrow><mml:mi>I</mml:mi><mml:mo>&#8290;</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>X</mml:mi><mml:mo>,</mml:mo><mml:mi>Y</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:math></inline-formula>, is defined as,<disp-formula id="equ2"><mml:math id="m2"><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:mi>I</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mi>X</mml:mi><mml:mo>,</mml:mo><mml:mi>Y</mml:mi><mml:mo stretchy="false">)</mml:mo><mml:mo>=</mml:mo><mml:mi>H</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mi>X</mml:mi><mml:mo stretchy="false">)</mml:mo><mml:mo>&#8722;</mml:mo><mml:mi>H</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mi>X</mml:mi><mml:mrow><mml:mo stretchy="false">|</mml:mo></mml:mrow><mml:mi>Y</mml:mi><mml:mo stretchy="false">)</mml:mo><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mo>=</mml:mo><mml:msub><mml:mi>D</mml:mi><mml:mrow><mml:mi>K</mml:mi><mml:mi>L</mml:mi></mml:mrow></mml:msub><mml:mo stretchy="false">(</mml:mo><mml:msub><mml:mi>P</mml:mi><mml:mrow><mml:mi>X</mml:mi><mml:mo>,</mml:mo><mml:mi>Y</mml:mi></mml:mrow></mml:msub><mml:mo stretchy="false">(</mml:mo><mml:mi>x</mml:mi><mml:mo>,</mml:mo><mml:mi>y</mml:mi><mml:mo stretchy="false">)</mml:mo><mml:mspace width="thickmathspace"/><mml:mo fence="false" stretchy="false">&#8214;</mml:mo><mml:mspace width="thickmathspace"/><mml:msub><mml:mi>P</mml:mi><mml:mrow><mml:mi>X</mml:mi></mml:mrow></mml:msub><mml:mo stretchy="false">(</mml:mo><mml:mi>x</mml:mi><mml:mo stretchy="false">)</mml:mo><mml:mo>&#8901;</mml:mo><mml:msub><mml:mi>P</mml:mi><mml:mrow><mml:mi>Y</mml:mi></mml:mrow></mml:msub><mml:mo stretchy="false">(</mml:mo><mml:mi>y</mml:mi><mml:mo stretchy="false">)</mml:mo><mml:mo stretchy="false">)</mml:mo></mml:mstyle></mml:mrow></mml:mstyle></mml:math></disp-formula>where <inline-formula><mml:math id="inf20"><mml:mrow><mml:mi>H</mml:mi><mml:mo>&#8290;</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>X</mml:mi><mml:mo lspace="2.5pt" rspace="2.5pt" stretchy="false">|</mml:mo><mml:mi>Y</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:math></inline-formula> is the conditional Shannon entropy, <inline-formula><mml:math id="inf21"><mml:mrow><mml:msub><mml:mi>P</mml:mi><mml:mrow><mml:mi>X</mml:mi><mml:mo>,</mml:mo><mml:mi>Y</mml:mi></mml:mrow></mml:msub><mml:mo>&#8290;</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>x</mml:mi><mml:mo>,</mml:mo><mml:mi>y</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:math></inline-formula> is the joint distribution of <inline-formula><mml:math id="inf22"><mml:mi>X</mml:mi></mml:math></inline-formula> and <inline-formula><mml:math id="inf23"><mml:mi>Y</mml:mi></mml:math></inline-formula>, and <inline-formula><mml:math id="inf24"><mml:mrow><mml:msub><mml:mi>P</mml:mi><mml:mi>X</mml:mi></mml:msub><mml:mo>&#8290;</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>x</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:math></inline-formula> is a marginal distribution.</p><p>The value at 100% is omitted because it has the same cluster number as our dataset and zero variance since <italic>WaveMAP</italic> is invariant to random seed selection. Thus the variation in cluster number due to sampling and random seed are compounded and shown together in <xref ref-type="fig" rid="fig3s1">Figure 3&#8212;figure supplement 1B</xref>.</p><p>Ensemble clustering for graphs (ECG) (<xref ref-type="bibr" rid="bib132">Poulin and Th&#233;berge, 2018</xref>; <xref ref-type="bibr" rid="bib133">Poulin and Th&#233;berge, 2019</xref>) was used to validate the clusters found in <xref ref-type="fig" rid="fig3">Figure 3A</xref> (see <xref ref-type="fig" rid="fig3s1">Figure 3&#8212;figure supplement 1C</xref>). We added the algorithm (<ext-link ext-link-type="uri" xlink:href="https://github.com/ftheberge/Ensemble-Clustering-for-Graphs">https://github.com/ftheberge/Ensemble-Clustering-for-Graphs</ext-link>; <xref ref-type="bibr" rid="bib165">Th&#233;berge, 2020</xref>) into the python-igraph package (<xref ref-type="bibr" rid="bib36">Csardi and Nepusz, 2006</xref>) and passed UMAP graphs into it directly. We set the number of partitions <inline-formula><mml:math id="inf25"><mml:mi>k</mml:mi></mml:math></inline-formula> to be 10 to produce the plot in <xref ref-type="fig" rid="fig3s1">Figure 3&#8212;figure supplement 1C</xref>. This algorithm uses <inline-formula><mml:math id="inf26"><mml:mi>k</mml:mi></mml:math></inline-formula> different randomized instantiations of the clusters in the graph followed by one round of Louvain clustering (<xref ref-type="fig" rid="fig2s1">Figure 2&#8212;figure supplement 1B</xref>). Each of these <inline-formula><mml:math id="inf27"><mml:mi>k</mml:mi></mml:math></inline-formula> level-1 graphs (called level-1 partitions since one round of Louvain was performed) are then combined as a single graph such that when edges co-occur between nodes in one of the <inline-formula><mml:math id="inf28"><mml:mi>k</mml:mi></mml:math></inline-formula> graphs, it is more heavily weighted. This ensembling of several graphs via the weight function <inline-formula><mml:math id="inf29"><mml:msub><mml:mi>W</mml:mi><mml:mi class="ltx_font_mathcaligraphic">&#119979;</mml:mi></mml:msub></mml:math></inline-formula> (see Supplemental Materials and methods section <italic>Ensemble clustering for graphs (ECG)</italic>) yields the final ECG graph.</p></sec><sec id="s4-12"><title>Gradient boosted decision tree classifier</title><p>We then trained a gradient boosted decision tree classifier in xgboost 1.0.2 (RRID:<ext-link ext-link-type="uri" xlink:href="https://scicrunch.org/resolver/SCR_021361">SCR_021361</ext-link>) (<xref ref-type="bibr" rid="bib27">Chen and Guestrin, 2016</xref>). A 30&#8211;70% test-train split was used with the test set never seen by model training or hyperparameter tuning. A 5-fold cross-validation was applied to the training data and optimal hyperparameters were obtained after a grid search on the folds using scikit-learn&#8217;s (RRID:<ext-link ext-link-type="uri" xlink:href="https://scicrunch.org/resolver/SCR_002577">SCR_002577</ext-link>) GridSearchCV function with final parameters in <xref ref-type="table" rid="table1">Table 1</xref>. The default multi-class objective function multi:softmax was also used. The percent accuracy for each cluster against all others is plotted as a confusion matrix in <xref ref-type="fig" rid="fig3">Figure 3C</xref> by applying the final classifier model to the unseen test set.</p><p>The same procedure was used when training on the GMM labels found in <xref ref-type="fig" rid="fig4">Figure 4D</xref> and for the eight cluster GMM labels in <xref ref-type="fig" rid="fig4s2">Figure 4&#8212;figure supplement 2B</xref>. Each of these classifiers also separately underwent hyperparameter tuning using scikit-learn&#8217;s GridSearchCV function as well with final hyperparameters shown in 2.</p><p>It is important to note that cross-validation was done after the cluster labels were generated by looking at the entire dataset (both via the algorithm itself and our tuning of parameters). This results in data leakage (<xref ref-type="bibr" rid="bib117">Moscovich and Rosset, 2019</xref>; <xref ref-type="bibr" rid="bib77">Kaufman et al., 2011</xref>) which potentially hurts out-of-dataset performance. Thus, classifier performance is only used here to demonstrate UMAP&#8217;s ability to sensibly separate waveforms within-dataset relative to traditional GMM methods (<xref ref-type="fig" rid="fig4">Figure 4D</xref>). <xref ref-type="fig" rid="fig3">Figure 3C</xref> is not leveraged to provide firm insight into how such a classifier would perform out-of-dataset. It is also important to note that none of the parameters for <italic>WaveMAP</italic> (n_neighbors or resolution) were tuned to optimize for classifier performance and thus the direction of bias is not necessarily deleterious.</p></sec><sec id="s4-13"><title>Specified waveform shape features</title><p>To compute specified features for each normalized waveforms (<xref ref-type="fig" rid="fig4">Figure 4A</xref>), we first up-sampled the waveforms from 48 to 480 time points using a cubic spline interpolation method. We then used this up-sampled waveform to compute three separate features: trough to peak duration, AP width, and peak ratio. Trough to peak is the time from the bottom of the depolarization trough (global minimum) to the post-hyperpolarization peak (subsequent local maximum). AP width was calculated as the width of the depolarization trough at the full-width half-minimum point. Both these measures were found using the mwave function from the MLIB 1.7.0.0 toolbox (<xref ref-type="bibr" rid="bib160">Stuttgen, 2019</xref>). Peak ratio was the ratio of heights (above baseline) between the pre-hyperpolarization (maximum before trough) and the post-hyperpolarization peak (maximum after trough).</p></sec><sec id="s4-14"><title>Gaussian mixture model clustering</title><p>Using the specified feature values (trough to peak, AP width, and peak ratio), the normalized waveforms were clustered in the three-dimensional feature space using a Gaussian mixture model (GMM) with hard-assignment (each data point belongs to one cluster) through MATLAB&#8217;s fitgmdist function across 50 replicates (<xref ref-type="fig" rid="fig4">Figure 4B</xref>). Each replicate is a different random instantiation of the GMM algorithm and the model with the largest log likelihood is chosen.</p><p>The Bayesian information criterion (BIC) was used to determine the optimal cluster number and is defined as<disp-formula id="equ3"><label>(1)</label><mml:math id="m3"><mml:mrow><mml:mi>BIC</mml:mi><mml:mo>=</mml:mo><mml:mrow><mml:mrow><mml:mo>-</mml:mo><mml:mrow><mml:mn>2</mml:mn><mml:mo>&#8290;</mml:mo><mml:mrow><mml:mi>ln</mml:mi><mml:mo>&#8289;</mml:mo><mml:mi>P</mml:mi></mml:mrow><mml:mo>&#8290;</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>X</mml:mi><mml:mo lspace="2.5pt" rspace="2.5pt" stretchy="false">|</mml:mo><mml:mi>&#952;</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:mrow><mml:mo>+</mml:mo><mml:mrow><mml:mi>K</mml:mi><mml:mo>&#8290;</mml:mo><mml:mrow><mml:mi>ln</mml:mi><mml:mo>&#8289;</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>n</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:mrow></mml:mrow></mml:mrow></mml:math></disp-formula>where the first term <inline-formula><mml:math id="inf30"><mml:mrow><mml:mo>-</mml:mo><mml:mrow><mml:mn>2</mml:mn><mml:mo>&#8290;</mml:mo><mml:mrow><mml:mi>ln</mml:mi><mml:mo>&#8289;</mml:mo><mml:mi>P</mml:mi></mml:mrow><mml:mo>&#8290;</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>X</mml:mi><mml:mo lspace="2.5pt" rspace="2.5pt" stretchy="false">|</mml:mo><mml:mi>&#952;</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:mrow></mml:math></inline-formula> is a &#8216;goodness of fit&#8217; term obtained from the negative log likelihood function, that&#160;is, the conditional probability of observing the sample <inline-formula><mml:math id="inf31"><mml:mi>X</mml:mi></mml:math></inline-formula> given a vector of parameters <inline-formula><mml:math id="inf32"><mml:mi>&#952;</mml:mi></mml:math></inline-formula>. In the particular case of GMM, the function <inline-formula><mml:math id="inf33"><mml:mrow><mml:mi>P</mml:mi><mml:mo>&#8290;</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>X</mml:mi><mml:mo lspace="2.5pt" rspace="2.5pt" stretchy="false">|</mml:mo><mml:mi>&#952;</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:math></inline-formula> is the probability of observing the clusters given an underlying particular sum of multivariate Gaussians (the likelihood). The second term <inline-formula><mml:math id="inf34"><mml:mrow><mml:mi>K</mml:mi><mml:mo>&#8290;</mml:mo><mml:mrow><mml:mi>ln</mml:mi><mml:mo>&#8289;</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>n</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:mrow></mml:math></inline-formula> is a penalty on the number of parameters <inline-formula><mml:math id="inf35"><mml:mi>n</mml:mi></mml:math></inline-formula> which approximates model complexity. Penalizing the number of model parameters (number of clusters <inline-formula><mml:math id="inf36"><mml:mi>K</mml:mi></mml:math></inline-formula>) scaled by the number of data points <inline-formula><mml:math id="inf37"><mml:mi>n</mml:mi></mml:math></inline-formula> captures the idea that &#8216;simplicity is better&#8217;. This criterion ultimately constrains the number of Gaussians used to fit the data.</p><p>Assuming we have <inline-formula><mml:math id="inf38"><mml:msub><mml:mi>N</mml:mi><mml:mi>f</mml:mi></mml:msub></mml:math></inline-formula> features and <inline-formula><mml:math id="inf39"><mml:msub><mml:mi>N</mml:mi><mml:mi>c</mml:mi></mml:msub></mml:math></inline-formula> clusters we can calculate <inline-formula><mml:math id="inf40"><mml:mi>K</mml:mi></mml:math></inline-formula> using the following framework: For each Gaussian mixture model, the total number of parameters is <inline-formula><mml:math id="inf41"><mml:msub><mml:mi>N</mml:mi><mml:mi>f</mml:mi></mml:msub></mml:math></inline-formula> means and <inline-formula><mml:math id="inf42"><mml:mrow><mml:mrow><mml:msub><mml:mi>N</mml:mi><mml:mi>f</mml:mi></mml:msub><mml:mo>&#8290;</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msub><mml:mi>N</mml:mi><mml:mi>f</mml:mi></mml:msub><mml:mo>+</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mo>/</mml:mo><mml:mn>2</mml:mn></mml:mrow></mml:math></inline-formula> covariance parameters. Another free parameter that is learned is the weight for each Gaussian that sums up to 1, leaving us with <inline-formula><mml:math id="inf43"><mml:mrow><mml:msub><mml:mi>N</mml:mi><mml:mi>c</mml:mi></mml:msub><mml:mo>-</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:math></inline-formula> unique weights. Thus the <inline-formula><mml:math id="inf44"><mml:mi>K</mml:mi></mml:math></inline-formula> which is the effective number of parameters for a GMM is,<disp-formula id="equ4"><label>(2)</label><mml:math id="m4"><mml:mrow><mml:mi>K</mml:mi><mml:mo>=</mml:mo><mml:mrow><mml:mrow><mml:mrow><mml:msub><mml:mi>N</mml:mi><mml:mi>c</mml:mi></mml:msub><mml:mo>&#8290;</mml:mo><mml:mrow><mml:mo maxsize="160%" minsize="160%">(</mml:mo><mml:mrow><mml:msub><mml:mi>N</mml:mi><mml:mi>f</mml:mi></mml:msub><mml:mo>+</mml:mo><mml:mfrac><mml:mrow><mml:msub><mml:mi>N</mml:mi><mml:mi>f</mml:mi></mml:msub><mml:mo>&#8290;</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msub><mml:mi>N</mml:mi><mml:mi>f</mml:mi></mml:msub><mml:mo>+</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mn>2</mml:mn></mml:mfrac></mml:mrow><mml:mo maxsize="160%" minsize="160%">)</mml:mo></mml:mrow></mml:mrow><mml:mo>+</mml:mo><mml:msub><mml:mi>N</mml:mi><mml:mi>c</mml:mi></mml:msub></mml:mrow><mml:mo>-</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:mrow></mml:math></disp-formula></p><p>The &#8216;best&#8217; model in a BIC-sense will have the set of parameters <inline-formula><mml:math id="inf45"><mml:mi>&#952;</mml:mi></mml:math></inline-formula> maximizing the likelihood function (thus minimizing the negative log likelihood) for a given model or model family&#8212;a number of multivariate Gaussians in a three-dimensional feature space in this case. To arrive at the parameters best approximating the Gaussian distribution giving rise to the data (Maximum Likelihood Estimation or MLE), the Expectation-Maximization (EM) algorithm was used. The optimal cluster number was selected as the lowest number of clusters between 1 and 10 at which the change in BIC was minimized (at the &#8216;elbow&#8217; in <xref ref-type="fig" rid="fig4">Figure 4C</xref>).</p></sec><sec id="s4-15"><title>Interpretable machine learning: UMAP inverse transform and SHAP</title><p>To facilitate interpretability, we used the invertibility of the UMAP transform (which itself is based on Delauney triangulation) to generate test waveforms tiling the projected space <xref ref-type="fig" rid="fig5">Figure 5A</xref>. 100 evenly-spaced test coordinates were generated spanning a portion of the embedded space and passed backwards through the UMAP transform using umap&#8217;s built-in inverse_transform function. The waveform generated at each test point is shown color-coded to the nearest cluster color or in gray if the distance exceeds 0.5 units in UMAP space.</p><p>Using the package shap (RRID:<ext-link ext-link-type="uri" xlink:href="https://scicrunch.org/resolver/SCR_021362">SCR_021362</ext-link>;&#160;<xref ref-type="bibr" rid="bib97">Lundberg et al., 2020</xref>), SHAP values were calculated for the classifier trained on <italic>WaveMAP</italic> identified clusters. The trained XGBoost model was passed directly into the tree model-specific shap.TreeExplainer (<xref ref-type="bibr" rid="bib96">Lundberg et al., 2018</xref>) which then calculated the mean absolute SHAP values (the average impact on model performance, postive or negative) for all waveform time points (features). TreeExplainer assigned SHAP values for every time point class-by-class and these were used to generate the class-specific SHAP plots (<xref ref-type="fig" rid="fig5">Figure 5C</xref>). The SHAP values for each time point, across classes, was summed to generate the overall SHAP values for each time point (<xref ref-type="fig" rid="fig5">Figure 5B</xref>).</p></sec><sec id="s4-16"><title>Choice-selective signal</title><p>We use an approach developed in <xref ref-type="bibr" rid="bib110">Meister et al., 2013</xref> to estimate the choice-selective signal. We chose such an approach because decision-related activity of PMd neurons does not simply increase or decrease in firing rate and often shows considerable temporal modulation. We estimated for each neuron a choice-selective signal on a time point-by-time point basis as absolute value of the firing rate difference between left and right choice trials (|left - right|) or equivalently PREF-NONPREF. We use this choice-selective signal to understand choice-related dynamics and estimate discrimination time.</p></sec><sec id="s4-17"><title>Choice-related dynamics</title><p>To understand the dynamics of the choice-selectivity signal as a function of the unsigned checkerboard coherence, we performed the following analysis. As described above, we first estimated the choice-selectivity signal in spikes/s for each neuron and each checkerboard coherence as shown for example in <xref ref-type="fig" rid="fig7">Figure 7A,B</xref>. We then estimated the slope of this choice-selectivity signal in the 175&#8211;325 ms&#160;period after checkerboard onset. Repeating this analysis for each color coherence provided us with an estimate of the rate of change of the choice selectivity signal (<inline-formula><mml:math id="inf46"><mml:mi>&#951;</mml:mi></mml:math></inline-formula>) for all the coherences in spikes/s/s. Averaging over neurons for each cluster provided us with the graphs in <xref ref-type="fig" rid="fig7">Figure 7C,D</xref>. We then estimated the dependence of <inline-formula><mml:math id="inf47"><mml:mi>&#951;</mml:mi></mml:math></inline-formula> on color coherence by regressing <inline-formula><mml:math id="inf48"><mml:mi>&#951;</mml:mi></mml:math></inline-formula> and color coherence to estimate how strongly choice-selectivity signals in a particular cluster were modulated by the stimulus input. This modulation is summarized in <xref ref-type="fig" rid="fig7">Figure 7E</xref> and measured as &#8216;coherence slope&#8217; in units of spikes/s/s/% color coherence.</p></sec><sec id="s4-18"><title>Discrimination time</title><p>We identified the discrimination time, that is the time at which the neuron demonstrated significant choice selectivity, on a neuron-by-neuron basis. We compared the choice-selective signal at each point to the 95th percentile of the bootstrap estimates of baseline choice-selective signal (i.e. before checkerboard stimulus onset). We enforced the condition that the choice-selective signal should be significantly different from the baseline for at least 25 ms after this first identified time to be included as an estimate of a time of significant choice selectivity for that neuron. Using longer windows provided very similar results.</p></sec><sec id="s4-19"><title>Experimental subjects (anatomical data)</title><p>Archived tissues were harvested from six young rhesus macaques of both sexes (9 &#177; 1.13 years, <italic>Macaca mulatta</italic>). These subjects were close in age to the macaques used in the main study and were part of part of a larger program of studies on aging and cognition led by Dr. Douglas Rosene. These monkeys were obtained from the Yerkes National Primate Center (RRID:<ext-link ext-link-type="uri" xlink:href="https://scicrunch.org/resolver/SCR_001914">SCR_001914</ext-link>) and housed individually in the Laboratory Animal Science Center at the Boston University School of Medicine; these facilities are fully accredited by the Association for Assessment and Accreditation of Laboratory Animal Care (AAALAC). Research was conducted in strict accordance with the guidlines of the National Institutes of Health&#8217;s Guide for the Care and Use of Laboratory Animals and Public Health Service Policy on the Humane Care and Use of Laboratory Animals.</p></sec><sec id="s4-20"><title>Perfusion and fixation</title><p>All brain tissue for histological studies was fixed and harvested using our well-established two-stage perfusion protocol as described (<xref ref-type="bibr" rid="bib108">Medalla and Luebke, 2015</xref>). After sedation with ketamine hydrochloride (10 mg/ml) and deep anesthetization with sodium pentobarbital (to effect, 15 mg/kg i.v.), monkeys were perfused through the ascending aorta with ice-cold Krebs&#8211;Henseleit buffer containing (in mM): 6.4 <inline-formula><mml:math id="inf49"><mml:mrow><mml:msub><mml:mi>Na</mml:mi><mml:mn>2</mml:mn></mml:msub><mml:mo>&#8290;</mml:mo><mml:msub><mml:mi>HPO</mml:mi><mml:mn>4</mml:mn></mml:msub></mml:mrow></mml:math></inline-formula>, 1.4 <inline-formula><mml:math id="inf50"><mml:mrow><mml:msub><mml:mi>Na</mml:mi><mml:mn>2</mml:mn></mml:msub><mml:mo>&#8290;</mml:mo><mml:msub><mml:mi>PO</mml:mi><mml:mn>4</mml:mn></mml:msub></mml:mrow></mml:math></inline-formula>, 137.0 NaCl, 2.7 KCl, and 1.0 <italic>MgCl</italic><sub>2</sub> at pH 7.4 (Sigma-Aldrich) followed by fixation with 4% paraformaldehyde in 0.1M phosphate buffer (PB, pH 7.4, 37&#176;C). The fixed brain sample was blocked, in situ, in the coronal plane, removed from the skull and cryoprotected in a series of glycerol solutions, and flash frozen in 70&#176;C isopentane (<xref ref-type="bibr" rid="bib141">Rosene et al., 1986</xref>). The brain was cut on a freezing microtome in the coronal plate at 30 &#181;m and were systematically collected into 10 matched series and stored in cryoprotectant (15% glycerol, in 0.1M PB, pH 7.4) at &#8722;80&#176;C (<xref ref-type="bibr" rid="bib53">Estrada et al., 2017</xref>).</p></sec><sec id="s4-21"><title>Immunohistochemistry</title><p>To assess the laminar distribution of interneurons, we batch processed 30 &#181;m coronal sections through the rostral dorsal premotor cortex area (PMdr) from six specimens. Sections were immunolabelled for inhibitory neuronal subtypes based on their expression of calcium binding proteins, calbindin (CB), calretinin (CR), and parvalbumin (PV), which label non-overlapping populations in primates (<xref ref-type="bibr" rid="bib39">DeFelipe, 1997</xref>). Free floating sections were first rinsed (3 x 10 min, 4&#176;C) in 0.01M phosphate-buffered saline (PBS) and incubated in 50 mM glycine for 1 hr at 4&#176;C. Sections were then rinsed in 0.01M PBS (3 x 10 min, 4&#176;C), and antigen retrieval was performed with 10 mM sodium citrate (pH 8.5) in a 60&#8211;70&#176;C water bath for 20 min. Sections were then rinsed in 0.01M PBS (3 x 10 min, 4&#176;C) and incubated in pre-block (0.01M PBS, 5% bovine serum albumin [BSA], 5% normal donkey serum [NDS], 0.2% Triton X-100) to reduce any non-specific binding of secondary antibodies. Primary antibodies (<xref ref-type="fig" rid="fig1">Figure 1</xref>) were diluted in 0.1 M PB, 0.2% acetylated BSA (BSA-c), 1% NDS, 0.1% Triton X-100. To increase the penetration of the antibody, two microwave incubation sessions (2 &#215; 10 min at 150 watts) using the Pelco Biowave Pro (Ted Pella), followed by a 48 hr incubation at 4&#176;C with gentle agitation. After rinsing (3 x 10 min) in 0.01M PBS at 4&#176;C, sections were co-incubated with secondary antibodies diluted in incubation buffer (see 1), microwaved 2 &#215; 10 min at 150 W, and placed at 4&#176;C for 24 hr with gentle agitation. Sections were then rinsed (3 x 10 min) in 0.1M PB, mounted onto slides and coverslipped with prolong anti-fade gold mounting medium (ThermoFisher) and cured at room temperature in the dark.</p></sec><sec id="s4-22"><title>Confocal microscopy and visualization of immunofluorescent labeling</title><p>Immunofluorescent labeling was imaged using a laser-scanning confocal microscope (Leica SPE) using 488 and 561 nm diode lasers. For each coronal section, two sets of tile scan images of a cortical column, ~200 &#181;m wide and spanning, from pia to the white matter boundary, were obtained in the PMdr. This corresponded to the area 6FR in cytoarchitectural maps (<xref ref-type="bibr" rid="bib11">Barbas and Pandya, 1987</xref>; <xref ref-type="bibr" rid="bib115">Morecraft et al., 2004</xref>; <xref ref-type="bibr" rid="bib116">Morecraft et al., 2019</xref>) and area F7 in several functional maps (<xref ref-type="bibr" rid="bib104">Matelli and Luppino, 1996</xref>; <xref ref-type="bibr" rid="bib138">Rizzolatti et al., 1998</xref>). The two columns were spaced 200 &#181;m apart. All images were acquired using a plain apochromat 40x/1.3 NA oil-immersion objective at a resolution of 0.268 x 0.268 x 0.5 &#181;m voxel size. The resulting image stacks were deconvolved and converted to 8-bit images using AutoQuant (Media Cybernetics; RRID:<ext-link ext-link-type="uri" xlink:href="https://scicrunch.org/resolver/SCR_002465">SCR_002465</ext-link>) to improve the signal to noise ratio (<xref ref-type="bibr" rid="bib108">Medalla and Luebke, 2015</xref>).</p></sec><sec id="s4-23"><title>Stereological cell counting</title><p>Due to its demonstrated ability in producing minimally-biased results, 3D stereologic cell counting (<xref ref-type="bibr" rid="bib146">Schmitz et al., 2014</xref>) was utilized to count parvalbumin- (PV<sup>+</sup>), calretinin- (CR<sup>+</sup>) and calbindin-positive (CB<sup>+</sup>) cells. Using the CellCounter plugin in Fiji (RRID:<ext-link ext-link-type="uri" xlink:href="https://scicrunch.org/resolver/SCR_002285">SCR_002285</ext-link>) (<xref ref-type="bibr" rid="bib145">Schindelin et al., 2012</xref>) on each image stack after maximum intensity projection, the inhibitory cells were counted slice by slice, recognized by their round shape (as opposed to pyramids), lack of apical dendrite, and relatively high uniform intensity. Cells at the bottom slice of each image stack and touching the left image border were excluded to avoid double-counting.</p></sec><sec id="s4-24"><title>Statistics</title><p>All statistical tests (Kolmogorov-Smirnov, Kruskal-Wallis, and Mann-Whitney <italic>U</italic>) were conducted using the package scipy.stats (RRID:<ext-link ext-link-type="uri" xlink:href="https://scicrunch.org/resolver/SCR_008058">SCR_008058</ext-link>) (<xref ref-type="bibr" rid="bib147">SciPy 1.0 Contributors et al., 2020</xref>). Multiple comparisons were corrected for using false detection-rate adjusted p-values (Benjamini-Hochberg); this was done using scipy.stats.multitest and scikit-posthocs (RRID:<ext-link ext-link-type="uri" xlink:href="https://scicrunch.org/resolver/SCR_021363">SCR_021363</ext-link>) (<xref ref-type="bibr" rid="bib164">Terpilowski, 2019</xref>). Ordinary least squares regressions were conducted in the package statsmodels (RRID:<ext-link ext-link-type="uri" xlink:href="https://scicrunch.org/resolver/SCR_016074">SCR_016074</ext-link>) (<xref ref-type="bibr" rid="bib148">Seabold and Perktold, 2010</xref>). Bootstrapped standard errors of the median were calculated by taking 5000 random samples with replacement (a bootstrap) of a dataset and then the standard deviation of each bootstrap was taken. Effect sizes were given as adjusted <inline-formula><mml:math id="inf51"><mml:msup><mml:mi>R</mml:mi><mml:mn>2</mml:mn></mml:msup></mml:math></inline-formula> values or Cohen&#8217;s <inline-formula><mml:math id="inf52"><mml:msup><mml:mi>f</mml:mi><mml:mn>2</mml:mn></mml:msup></mml:math></inline-formula> (of a one-way ANOVA) using statsmodels.formula.api.ols and statsmodels.stats.oneway respectively.</p></sec></sec></body><back><ack id="ack"><title>Acknowledgements</title><p>We thank M Noichl and L McInnes for their suggestions on graph community detection and UMAP visualization. We also thank Dr. Kathleen Rockland, Dr. Jennifer Luebke, and Dr. Jonathan Kao for detailed comments on the previous versions of the manuscript.</p></ack><sec id="s5" sec-type="additional-information"><title>Additional information</title><fn-group content-type="competing-interest"><title>Competing interests</title><fn fn-type="COI-statement" id="conf1"><p>No competing interests declared</p></fn></fn-group><fn-group content-type="author-contribution"><title>Author contributions</title><fn fn-type="con" id="con1"><p>Conceptualization, Software, Formal analysis, Validation, Investigation, Methodology, Writing - original draft, Project administration, Writing - review and editing, Developed the UMAP and Louvain Clustering method combination that ultimately became WaveMAP and finally the interpretable machine learning approach</p></fn><fn fn-type="con" id="con2"><p>Data curation, Formal analysis, Investigation, Methodology</p></fn><fn fn-type="con" id="con3"><p>Data curation, Methodology, Writing - review and editing</p></fn><fn fn-type="con" id="con4"><p>Data curation, Formal analysis</p></fn><fn fn-type="con" id="con5"><p>Formal analysis, Investigation, Writing - review and editing, Prof. Medalla led the immunohistochemistry portion of the work in close collaboration with Mr. Kenji Lee, and Ms. Alexandra Tsolias</p></fn><fn fn-type="con" id="con6"><p>Resources, Funding acquisition, Writing - review and editing</p></fn><fn fn-type="con" id="con7"><p>Conceptualization, Data curation, Supervision, Funding acquisition, Investigation, Methodology, Writing - original draft, Project administration, Writing - review and editing, Trained monkeys and recorded in PMd using multi-contact electrodes and provided advice on the development of the WaveMAP approach</p></fn></fn-group><fn-group content-type="ethics-information"><title>Ethics</title><fn fn-type="other"><p>Animal experimentation: This study was performed in strict accordance with the recommendations in the Guide for the Care and Use of Laboratory Animals of the National Institutes of Health. All of the procedures were approved were approved by the Stanford Administrative Panel on Laboratory Animal Care (APLAC, Protocol Number 8856, entitled "Cortical Processing of Arm Movements"). Surgical procedures were performed under anesthesia, and every effort was made to minimize suffering. Appropriate analgesia, pain relief, and antibiotics were administered to the animals when needed after surgical approval.</p></fn></fn-group></sec><sec id="s6" sec-type="supplementary-material"><title>Additional files</title><supplementary-material id="scode1"><label>Source code 1.</label><caption><title>MATLAB code and Python notebook for replicating all figures and figure supplements in the manuscript.</title></caption><media mime-subtype="zip" mimetype="application" xlink:href="elife-67490-code1-v4.zip"/></supplementary-material><supplementary-material id="transrepform"><label>Transparent reporting form</label><media mime-subtype="docx" mimetype="application" xlink:href="elife-67490-transrepform-v4.docx"/></supplementary-material></sec><sec id="s7" sec-type="data-availability"><title>Data availability</title><p>Data generated or analysed during this study are included in the linked Dryad repository (<ext-link ext-link-type="uri" xlink:href="https://doi.org/10.5061/dryad.z612jm6cf">https://doi.org/10.5061/dryad.z612jm6cf</ext-link>). Source data for all figures are also in this zip file.</p><p>The following dataset was generated:</p><p><element-citation id="dataset1" publication-type="data" specific-use="isSupplementedBy"><person-group person-group-type="author"><name><surname>Lee</surname><given-names>EK</given-names></name><name><surname>Balasubramanian</surname><given-names>H</given-names></name><name><surname>Tsolias</surname><given-names>A</given-names></name><name><surname>Anakwe</surname><given-names>S</given-names></name><name><surname>Medalla</surname><given-names>M</given-names></name><name><surname>Shenoy</surname><given-names>K</given-names></name><name><surname>Chandrasekaran</surname><given-names>C</given-names></name></person-group><year iso-8601-date="2021">2021</year><data-title>WaveMAP analysis of extracellular waveforms from monkey premotor cortex during decision-making</data-title><source>Dryad Digital Repository</source><pub-id assigning-authority="Dryad" pub-id-type="doi">10.5061/dryad.z612jm6cf</pub-id></element-citation></p></sec><ref-list><title>References</title><ref id="bib1"><element-citation publication-type="book"><person-group person-group-type="author"><name><surname>Aggarwal</surname> <given-names>CC</given-names></name><name><surname>Hinneburg</surname> <given-names>A</given-names></name><name><surname>Keim</surname> <given-names>DA</given-names></name></person-group><year iso-8601-date="2001">2001</year><chapter-title>On the surprising behavior of distance metrics in high dimensional space</chapter-title><person-group person-group-type="editor"><name><surname>Bussche</surname> <given-names>J</given-names></name><name><surname>Anden</surname> <given-names>V</given-names></name><name><surname>Vianu</surname> <given-names>V. ictor</given-names></name></person-group><source>Database Theory &#8212; ICDT 2001</source><publisher-loc>Heidelberg, Berlin</publisher-loc><publisher-name>Springer</publisher-name><fpage>420</fpage><lpage>434</lpage><pub-id pub-id-type="doi">10.1007/3-540-44503-X</pub-id></element-citation></ref><ref id="bib2"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Ali</surname> <given-names>M</given-names></name><name><surname>Jones</surname> <given-names>MW</given-names></name><name><surname>Xie</surname> <given-names>X</given-names></name><name><surname>Williams</surname> <given-names>M</given-names></name></person-group><year iso-8601-date="2019">2019</year><article-title>TimeCluster: dimension reduction applied to temporal data for visual analytics</article-title><source>The Visual Computer</source><volume>35</volume><fpage>1013</fpage><lpage>1026</lpage><pub-id pub-id-type="doi">10.1007/s00371-019-01673-y</pub-id></element-citation></ref><ref id="bib3"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Amatrudo</surname> <given-names>JM</given-names></name><name><surname>Weaver</surname> <given-names>CM</given-names></name><name><surname>Crimins</surname> <given-names>JL</given-names></name><name><surname>Hof</surname> <given-names>PR</given-names></name><name><surname>Rosene</surname> <given-names>DL</given-names></name><name><surname>Luebke</surname> <given-names>JI</given-names></name></person-group><year iso-8601-date="2012">2012</year><article-title>Influence of highly distinctive structural properties on the excitability of pyramidal neurons in monkey visual and prefrontal cortices</article-title><source>Journal of Neuroscience</source><volume>32</volume><fpage>13644</fpage><lpage>13660</lpage><pub-id pub-id-type="doi">10.1523/JNEUROSCI.2581-12.2012</pub-id><pub-id pub-id-type="pmid">23035077</pub-id></element-citation></ref><ref id="bib4"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Ardid</surname> <given-names>S</given-names></name><name><surname>Vinck</surname> <given-names>M</given-names></name><name><surname>Kaping</surname> <given-names>D</given-names></name><name><surname>Marquez</surname> <given-names>S</given-names></name><name><surname>Everling</surname> <given-names>S</given-names></name><name><surname>Womelsdorf</surname> <given-names>T</given-names></name></person-group><year iso-8601-date="2015">2015</year><article-title>Mapping of functionally characterized cell classes onto canonical circuit operations in primate prefrontal cortex</article-title><source>Journal of Neuroscience</source><volume>35</volume><fpage>2975</fpage><lpage>2991</lpage><pub-id pub-id-type="doi">10.1523/JNEUROSCI.2700-14.2015</pub-id><pub-id pub-id-type="pmid">25698735</pub-id></element-citation></ref><ref id="bib5"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Arikuni</surname> <given-names>T</given-names></name><name><surname>Watanabe</surname> <given-names>K</given-names></name><name><surname>Kubota</surname> <given-names>K</given-names></name></person-group><year iso-8601-date="1988">1988</year><article-title>Connections of area 8 with area 6 in the brain of the macaque monkey</article-title><source>The Journal of Comparative Neurology</source><volume>277</volume><fpage>21</fpage><lpage>40</lpage><pub-id pub-id-type="doi">10.1002/cne.902770103</pub-id></element-citation></ref><ref id="bib6"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Azodi</surname> <given-names>CB</given-names></name><name><surname>Tang</surname> <given-names>J</given-names></name><name><surname>Shiu</surname> <given-names>SH</given-names></name></person-group><year iso-8601-date="2020">2020</year><article-title>Opening the black box: interpretable machine learning for geneticists</article-title><source>Trends in Genetics</source><volume>36</volume><fpage>442</fpage><lpage>455</lpage><pub-id pub-id-type="doi">10.1016/j.tig.2020.03.005</pub-id><pub-id pub-id-type="pmid">32396837</pub-id></element-citation></ref><ref id="bib7"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Azouz</surname> <given-names>R</given-names></name><name><surname>Gray</surname> <given-names>CM</given-names></name><name><surname>Nowak</surname> <given-names>LG</given-names></name><name><surname>McCormick</surname> <given-names>DA</given-names></name></person-group><year iso-8601-date="1997">1997</year><article-title>Physiological properties of inhibitory interneurons in cat striate cortex</article-title><source>Cerebral Cortex</source><volume>7</volume><fpage>534</fpage><lpage>545</lpage><pub-id pub-id-type="doi">10.1093/cercor/7.6.534</pub-id><pub-id pub-id-type="pmid">9276178</pub-id></element-citation></ref><ref id="bib8"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Bakkum</surname> <given-names>DJ</given-names></name><name><surname>Frey</surname> <given-names>U</given-names></name><name><surname>Radivojevic</surname> <given-names>M</given-names></name><name><surname>Russell</surname> <given-names>TL</given-names></name><name><surname>M&#252;ller</surname> <given-names>J</given-names></name><name><surname>Fiscella</surname> <given-names>M</given-names></name><name><surname>Takahashi</surname> <given-names>H</given-names></name><name><surname>Hierlemann</surname> <given-names>A</given-names></name></person-group><year iso-8601-date="2013">2013</year><article-title>Tracking axonal action potential propagation on a high-density microelectrode array across hundreds of sites</article-title><source>Nature Communications</source><volume>4</volume><elocation-id>2181</elocation-id><pub-id pub-id-type="doi">10.1038/ncomms3181</pub-id><pub-id pub-id-type="pmid">23867868</pub-id></element-citation></ref><ref id="bib9"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Bala</surname> <given-names>PC</given-names></name><name><surname>Eisenreich</surname> <given-names>BR</given-names></name><name><surname>Yoo</surname> <given-names>SBM</given-names></name><name><surname>Hayden</surname> <given-names>BY</given-names></name><name><surname>Park</surname> <given-names>HS</given-names></name><name><surname>Zimmermann</surname> <given-names>J</given-names></name></person-group><year iso-8601-date="2020">2020</year><article-title>Automated markerless pose estimation in freely moving macaques with OpenMonkeyStudio</article-title><source>Nature Communications</source><volume>11</volume><elocation-id>4560</elocation-id><pub-id pub-id-type="doi">10.1038/s41467-020-18441-5</pub-id><pub-id pub-id-type="pmid">32917899</pub-id></element-citation></ref><ref id="bib10"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Banaie Boroujeni</surname> <given-names>K</given-names></name><name><surname>Tiesinga</surname> <given-names>P</given-names></name><name><surname>Womelsdorf</surname> <given-names>T</given-names></name></person-group><year iso-8601-date="2021">2021</year><article-title>Interneuron-specific gamma synchronization indexes cue uncertainty and prediction errors in lateral prefrontal and anterior cingulate cortex</article-title><source>eLife</source><volume>10</volume><elocation-id>e69111</elocation-id><pub-id pub-id-type="doi">10.7554/eLife.69111</pub-id><pub-id pub-id-type="pmid">34142661</pub-id></element-citation></ref><ref id="bib11"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Barbas</surname> <given-names>H</given-names></name><name><surname>Pandya</surname> <given-names>DN</given-names></name></person-group><year iso-8601-date="1987">1987</year><article-title>Architecture and frontal cortical connections of the premotor cortex (area 6) in the rhesus monkey</article-title><source>The Journal of Comparative Neurology</source><volume>256</volume><fpage>211</fpage><lpage>228</lpage><pub-id pub-id-type="doi">10.1002/cne.902560203</pub-id><pub-id pub-id-type="pmid">3558879</pub-id></element-citation></ref><ref id="bib12"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Barry</surname> <given-names>JM</given-names></name></person-group><year iso-8601-date="2015">2015</year><article-title>Axonal activity in vivo: technical considerations and implications for the exploration of neural circuits in freely moving animals</article-title><source>Frontiers in Neuroscience</source><volume>9</volume><elocation-id>153</elocation-id><pub-id pub-id-type="doi">10.3389/fnins.2015.00153</pub-id><pub-id pub-id-type="pmid">25999806</pub-id></element-citation></ref><ref id="bib13"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Barth&#243;</surname> <given-names>P</given-names></name><name><surname>Hirase</surname> <given-names>H</given-names></name><name><surname>Monconduit</surname> <given-names>L</given-names></name><name><surname>Zugaro</surname> <given-names>M</given-names></name><name><surname>Harris</surname> <given-names>KD</given-names></name><name><surname>Buzs&#225;ki</surname> <given-names>G</given-names></name></person-group><year iso-8601-date="2004">2004</year><article-title>Characterization of neocortical principal cells and interneurons by network interactions and extracellular features</article-title><source>Journal of Neurophysiology</source><volume>92</volume><fpage>600</fpage><lpage>608</lpage><pub-id pub-id-type="doi">10.1152/jn.01170.2003</pub-id><pub-id pub-id-type="pmid">15056678</pub-id></element-citation></ref><ref id="bib14"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Bastos</surname> <given-names>AM</given-names></name><name><surname>Loonis</surname> <given-names>R</given-names></name><name><surname>Kornblith</surname> <given-names>S</given-names></name><name><surname>Lundqvist</surname> <given-names>M</given-names></name><name><surname>Miller</surname> <given-names>EK</given-names></name></person-group><year iso-8601-date="2018">2018</year><article-title>Laminar recordings in frontal cortex suggest distinct layers for maintenance and control of working memory</article-title><source>PNAS</source><volume>115</volume><fpage>1117</fpage><lpage>1122</lpage><pub-id pub-id-type="doi">10.1073/pnas.1710323115</pub-id></element-citation></ref><ref id="bib15"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Bean</surname> <given-names>BP</given-names></name></person-group><year iso-8601-date="2007">2007</year><article-title>The action potential in mammalian central neurons</article-title><source>Nature Reviews Neuroscience</source><volume>8</volume><fpage>451</fpage><lpage>465</lpage><pub-id pub-id-type="doi">10.1038/nrn2148</pub-id><pub-id pub-id-type="pmid">17514198</pub-id></element-citation></ref><ref id="bib16"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Becht</surname> <given-names>E</given-names></name><name><surname>McInnes</surname> <given-names>L</given-names></name><name><surname>Healy</surname> <given-names>J</given-names></name><name><surname>Dutertre</surname> <given-names>C-A</given-names></name><name><surname>Kwok</surname> <given-names>IWH</given-names></name><name><surname>Ng</surname> <given-names>LG</given-names></name><name><surname>Ginhoux</surname> <given-names>F</given-names></name><name><surname>Newell</surname> <given-names>EW</given-names></name></person-group><year iso-8601-date="2019">2019</year><article-title>Dimensionality reduction for visualizing single-cell data using UMAP</article-title><source>Nature Biotechnology</source><volume>37</volume><fpage>38</fpage><lpage>44</lpage><pub-id pub-id-type="doi">10.1038/nbt.4314</pub-id></element-citation></ref><ref id="bib17"><element-citation publication-type="book"><person-group person-group-type="author"><name><surname>Belkin</surname> <given-names>M</given-names></name><name><surname>Niyogi</surname> <given-names>P</given-names></name></person-group><year iso-8601-date="2002">2002</year><chapter-title>Laplacian eigenmaps and spectral techniques for embedding and clustering</chapter-title><person-group person-group-type="editor"><name><surname>Dietterich</surname> <given-names>T. G</given-names></name><name><surname>Becker</surname> <given-names>S</given-names></name><name><surname>Ghahramani</surname> <given-names>Z</given-names></name></person-group><source>Advances in Neural Information Processing Systems</source><volume>14</volume><publisher-name>MIT Press</publisher-name><fpage>585</fpage><lpage>591</lpage></element-citation></ref><ref id="bib18"><element-citation publication-type="preprint"><person-group person-group-type="author"><name><surname>Bellet</surname> <given-names>A</given-names></name><name><surname>Habrard</surname> <given-names>A</given-names></name><name><surname>Sebban</surname> <given-names>M</given-names></name></person-group><year iso-8601-date="2013">2013</year><article-title>A survey on metric learning for feature vectors and structured data</article-title><source>arXiv</source><ext-link ext-link-type="uri" xlink:href="https://arxiv.org/abs/1306.6709">https://arxiv.org/abs/1306.6709</ext-link></element-citation></ref><ref id="bib19"><element-citation publication-type="book"><person-group person-group-type="author"><name><surname>Bengio</surname> <given-names>Y</given-names></name><name><surname>Vincent</surname> <given-names>P</given-names></name><name><surname>Paiement</surname> <given-names>J</given-names></name></person-group><year iso-8601-date="2003">2003</year><source>Spectral Clustering and Kernel Pca Are Learning Eigenfunctions</source><publisher-loc>London</publisher-loc><publisher-name>Institute for Mathematical Sciences</publisher-name></element-citation></ref><ref id="bib20"><element-citation publication-type="confproc"><person-group person-group-type="author"><name><surname>Beyer</surname> <given-names>KS</given-names></name><name><surname>Goldstein</surname> <given-names>J</given-names></name><name><surname>Ramakrishnan</surname> <given-names>R</given-names></name><name><surname>Shaft</surname> <given-names>U</given-names></name></person-group><year iso-8601-date="1999">1999</year><article-title>When is &#8221;Nearest Neighbor&#8221; Meaningful?</article-title><conf-name>Proceedings of the 7th International Conference on Database Theory</conf-name><fpage>217</fpage><lpage>235</lpage></element-citation></ref><ref id="bib21"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Billeh</surname> <given-names>YN</given-names></name><name><surname>Cai</surname> <given-names>B</given-names></name><name><surname>Gratiy</surname> <given-names>SL</given-names></name><name><surname>Dai</surname> <given-names>K</given-names></name><name><surname>Iyer</surname> <given-names>R</given-names></name><name><surname>Gouwens</surname> <given-names>NW</given-names></name><name><surname>Abbasi-Asl</surname> <given-names>R</given-names></name><name><surname>Jia</surname> <given-names>X</given-names></name><name><surname>Siegle</surname> <given-names>JH</given-names></name><name><surname>Olsen</surname> <given-names>SR</given-names></name><name><surname>Koch</surname> <given-names>C</given-names></name><name><surname>Mihalas</surname> <given-names>S</given-names></name><name><surname>Arkhipov</surname> <given-names>A</given-names></name></person-group><year iso-8601-date="2020">2020</year><article-title>Systematic integration of structural and functional data into Multi-scale models of mouse primary visual cortex</article-title><source>Neuron</source><volume>106</volume><fpage>388</fpage><lpage>403</lpage><pub-id pub-id-type="doi">10.1016/j.neuron.2020.01.040</pub-id><pub-id pub-id-type="pmid">32142648</pub-id></element-citation></ref><ref id="bib22"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Blondel</surname> <given-names>VD</given-names></name><name><surname>Guillaume</surname> <given-names>J-L</given-names></name><name><surname>Lambiotte</surname> <given-names>R</given-names></name><name><surname>Lefebvre</surname> <given-names>E</given-names></name></person-group><year iso-8601-date="2008">2008</year><article-title>Fast unfolding of communities in large networks</article-title><source>Journal of Statistical Mechanics: Theory and Experiment</source><volume>2008</volume><elocation-id>P10008</elocation-id><pub-id pub-id-type="doi">10.1088/1742-5468/2008/10/P10008</pub-id></element-citation></ref><ref id="bib23"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Bruce</surname> <given-names>CJ</given-names></name><name><surname>Goldberg</surname> <given-names>ME</given-names></name></person-group><year iso-8601-date="1985">1985</year><article-title>Primate frontal eye fields. I. single neurons discharging before saccades</article-title><source>Journal of Neurophysiology</source><volume>53</volume><fpage>603</fpage><lpage>635</lpage><pub-id pub-id-type="doi">10.1152/jn.1985.53.3.603</pub-id><pub-id pub-id-type="pmid">3981231</pub-id></element-citation></ref><ref id="bib24"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Chah</surname> <given-names>E</given-names></name><name><surname>Hok</surname> <given-names>V</given-names></name><name><surname>Della-Chiesa</surname> <given-names>A</given-names></name><name><surname>Miller</surname> <given-names>JJ</given-names></name><name><surname>O'Mara</surname> <given-names>SM</given-names></name><name><surname>Reilly</surname> <given-names>RB</given-names></name></person-group><year iso-8601-date="2011">2011</year><article-title>Automated spike sorting algorithm based on laplacian eigenmaps and k-means clustering</article-title><source>Journal of Neural Engineering</source><volume>8</volume><elocation-id>016006</elocation-id><pub-id pub-id-type="doi">10.1088/1741-2560/8/1/016006</pub-id><pub-id pub-id-type="pmid">21248378</pub-id></element-citation></ref><ref id="bib25"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Chandrasekaran</surname> <given-names>C</given-names></name><name><surname>Peixoto</surname> <given-names>D</given-names></name><name><surname>Newsome</surname> <given-names>WT</given-names></name><name><surname>Shenoy</surname> <given-names>KV</given-names></name></person-group><year iso-8601-date="2017">2017</year><article-title>Laminar differences in decision-related neural activity in dorsal premotor cortex</article-title><source>Nature Communications</source><volume>8</volume><elocation-id>614</elocation-id><pub-id pub-id-type="doi">10.1038/s41467-017-00715-0</pub-id><pub-id pub-id-type="pmid">28931803</pub-id></element-citation></ref><ref id="bib26"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Chandrasekaran</surname> <given-names>C</given-names></name><name><surname>Bray</surname> <given-names>IE</given-names></name><name><surname>Shenoy</surname> <given-names>KV</given-names></name></person-group><year iso-8601-date="2019">2019</year><article-title>Frequency shifts and depth dependence of premotor beta band activity during perceptual Decision-Making</article-title><source>The Journal of Neuroscience</source><volume>39</volume><fpage>1420</fpage><lpage>1435</lpage><pub-id pub-id-type="doi">10.1523/JNEUROSCI.1066-18.2018</pub-id><pub-id pub-id-type="pmid">30606756</pub-id></element-citation></ref><ref id="bib27"><element-citation publication-type="preprint"><person-group person-group-type="author"><name><surname>Chen</surname> <given-names>T</given-names></name><name><surname>Guestrin</surname> <given-names>C</given-names></name></person-group><year iso-8601-date="2016">2016</year><article-title>Xgboost: a scalable tree boosting system</article-title><source>arXiv</source><ext-link ext-link-type="uri" xlink:href="https://arxiv.org/abs/1603.02754">https://arxiv.org/abs/1603.02754</ext-link></element-citation></ref><ref id="bib28"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Churchland</surname> <given-names>MM</given-names></name><name><surname>Cunningham</surname> <given-names>JP</given-names></name><name><surname>Kaufman</surname> <given-names>MT</given-names></name><name><surname>Foster</surname> <given-names>JD</given-names></name><name><surname>Nuyujukian</surname> <given-names>P</given-names></name><name><surname>Ryu</surname> <given-names>SI</given-names></name><name><surname>Shenoy</surname> <given-names>KV</given-names></name></person-group><year iso-8601-date="2012">2012</year><article-title>Neural population dynamics during reaching</article-title><source>Nature</source><volume>487</volume><fpage>51</fpage><lpage>56</lpage><pub-id pub-id-type="doi">10.1038/nature11129</pub-id><pub-id pub-id-type="pmid">22722855</pub-id></element-citation></ref><ref id="bib29"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Cisek</surname> <given-names>P</given-names></name></person-group><year iso-8601-date="2012">2012</year><article-title>Making decisions through a distributed consensus</article-title><source>Current Opinion in Neurobiology</source><volume>22</volume><fpage>927</fpage><lpage>936</lpage><pub-id pub-id-type="doi">10.1016/j.conb.2012.05.007</pub-id><pub-id pub-id-type="pmid">22683275</pub-id></element-citation></ref><ref id="bib30"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Cohen</surname> <given-names>JY</given-names></name><name><surname>Haesler</surname> <given-names>S</given-names></name><name><surname>Vong</surname> <given-names>L</given-names></name><name><surname>Lowell</surname> <given-names>BB</given-names></name><name><surname>Uchida</surname> <given-names>N</given-names></name></person-group><year iso-8601-date="2012">2012</year><article-title>Neuron-type-specific signals for reward and punishment in the ventral tegmental area</article-title><source>Nature</source><volume>482</volume><fpage>85</fpage><lpage>88</lpage><pub-id pub-id-type="doi">10.1038/nature10754</pub-id><pub-id pub-id-type="pmid">22258508</pub-id></element-citation></ref><ref id="bib31"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Connors</surname> <given-names>BW</given-names></name><name><surname>Gutnick</surname> <given-names>MJ</given-names></name><name><surname>Prince</surname> <given-names>DA</given-names></name></person-group><year iso-8601-date="1982">1982</year><article-title>Electrophysiological properties of neocortical neurons in vitro</article-title><source>Journal of Neurophysiology</source><volume>48</volume><fpage>1302</fpage><lpage>1320</lpage><pub-id pub-id-type="doi">10.1152/jn.1982.48.6.1302</pub-id><pub-id pub-id-type="pmid">6296328</pub-id></element-citation></ref><ref id="bib32"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Connors</surname> <given-names>BW</given-names></name><name><surname>Gutnick</surname> <given-names>MJ</given-names></name></person-group><year iso-8601-date="1990">1990</year><article-title>Intrinsic firing patterns of diverse neocortical neurons</article-title><source>Trends in Neurosciences</source><volume>13</volume><fpage>99</fpage><lpage>104</lpage><pub-id pub-id-type="doi">10.1016/0166-2236(90)90185-D</pub-id><pub-id pub-id-type="pmid">1691879</pub-id></element-citation></ref><ref id="bib33"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Constantinople</surname> <given-names>CM</given-names></name><name><surname>Disney</surname> <given-names>AA</given-names></name><name><surname>Maffie</surname> <given-names>J</given-names></name><name><surname>Rudy</surname> <given-names>B</given-names></name><name><surname>Hawken</surname> <given-names>MJ</given-names></name></person-group><year iso-8601-date="2009">2009</year><article-title>Quantitative analysis of neurons with Kv3 potassium channel subunits, Kv3.1b and Kv3.2, in macaque primary visual cortex</article-title><source>The Journal of Comparative Neurology</source><volume>516</volume><fpage>291</fpage><lpage>311</lpage><pub-id pub-id-type="doi">10.1002/cne.22111</pub-id></element-citation></ref><ref id="bib34"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Contreras</surname> <given-names>D</given-names></name></person-group><year iso-8601-date="2004">2004</year><article-title>Electrophysiological classes of neocortical neurons</article-title><source>Neural Networks</source><volume>17</volume><fpage>633</fpage><lpage>646</lpage><pub-id pub-id-type="doi">10.1016/j.neunet.2004.04.003</pub-id><pub-id pub-id-type="pmid">15288889</pub-id></element-citation></ref><ref id="bib35"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Courtin</surname> <given-names>J</given-names></name><name><surname>Chaudun</surname> <given-names>F</given-names></name><name><surname>Rozeske</surname> <given-names>RR</given-names></name><name><surname>Karalis</surname> <given-names>N</given-names></name><name><surname>Gonzalez-Campo</surname> <given-names>C</given-names></name><name><surname>Wurtz</surname> <given-names>H</given-names></name><name><surname>Abdi</surname> <given-names>A</given-names></name><name><surname>Baufreton</surname> <given-names>J</given-names></name><name><surname>Bienvenu</surname> <given-names>TC</given-names></name><name><surname>Herry</surname> <given-names>C</given-names></name></person-group><year iso-8601-date="2014">2014</year><article-title>Prefrontal parvalbumin interneurons shape neuronal activity to drive fear expression</article-title><source>Nature</source><volume>505</volume><fpage>92</fpage><lpage>96</lpage><pub-id pub-id-type="doi">10.1038/nature12755</pub-id><pub-id pub-id-type="pmid">24256726</pub-id></element-citation></ref><ref id="bib36"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Csardi</surname> <given-names>G</given-names></name><name><surname>Nepusz</surname> <given-names>T</given-names></name></person-group><year iso-8601-date="2006">2006</year><article-title>The igraph software package for complex network research</article-title><source>InterJournal, Complex Systems</source><volume>1695</volume><fpage>1</fpage><lpage>9</lpage></element-citation></ref><ref id="bib37"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Cunningham</surname> <given-names>JP</given-names></name><name><surname>Yu</surname> <given-names>BM</given-names></name></person-group><year iso-8601-date="2014">2014</year><article-title>Dimensionality reduction for large-scale neural recordings</article-title><source>Nature Neuroscience</source><volume>17</volume><fpage>1500</fpage><lpage>1509</lpage><pub-id pub-id-type="doi">10.1038/nn.3776</pub-id><pub-id pub-id-type="pmid">25151264</pub-id></element-citation></ref><ref id="bib38"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>De</surname> <given-names>A</given-names></name><name><surname>El-Shamayleh</surname> <given-names>Y</given-names></name><name><surname>Horwitz</surname> <given-names>GD</given-names></name></person-group><year iso-8601-date="2020">2020</year><article-title>Fast and reversible neural inactivation in macaque cortex by optogenetic stimulation of GABAergic neurons</article-title><source>eLife</source><volume>9</volume><elocation-id>e52658</elocation-id><pub-id pub-id-type="doi">10.7554/eLife.52658</pub-id><pub-id pub-id-type="pmid">32452766</pub-id></element-citation></ref><ref id="bib39"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>DeFelipe</surname> <given-names>J</given-names></name></person-group><year iso-8601-date="1997">1997</year><article-title>Types of neurons, synaptic connections and chemical characteristics of cells immunoreactive for calbindin-D28K, parvalbumin and calretinin in the neocortex</article-title><source>Journal of Chemical Neuroanatomy</source><volume>14</volume><fpage>1</fpage><lpage>19</lpage><pub-id pub-id-type="doi">10.1016/S0891-0618(97)10013-8</pub-id><pub-id pub-id-type="pmid">9498163</pub-id></element-citation></ref><ref id="bib40"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Deligkaris</surname> <given-names>K</given-names></name><name><surname>Bullmann</surname> <given-names>T</given-names></name><name><surname>Frey</surname> <given-names>U</given-names></name></person-group><year iso-8601-date="2016">2016</year><article-title>Extracellularly recorded somatic and neuritic signal shapes and classification algorithms for High-Density microelectrode array electrophysiology</article-title><source>Frontiers in Neuroscience</source><volume>10</volume><elocation-id>421</elocation-id><pub-id pub-id-type="doi">10.3389/fnins.2016.00421</pub-id><pub-id pub-id-type="pmid">27683541</pub-id></element-citation></ref><ref id="bib41"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Deubner</surname> <given-names>J</given-names></name><name><surname>Coulon</surname> <given-names>P</given-names></name><name><surname>Diester</surname> <given-names>I</given-names></name></person-group><year iso-8601-date="2019">2019</year><article-title>Optogenetic approaches to study the mammalian brain</article-title><source>Current Opinion in Structural Biology</source><volume>57</volume><fpage>157</fpage><lpage>163</lpage><pub-id pub-id-type="doi">10.1016/j.sbi.2019.04.003</pub-id><pub-id pub-id-type="pmid">31082625</pub-id></element-citation></ref><ref id="bib42"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Diaz-Papkovich</surname> <given-names>A</given-names></name><name><surname>Anderson-Trocm&#233;</surname> <given-names>L</given-names></name><name><surname>Ben-Eghan</surname> <given-names>C</given-names></name><name><surname>Gravel</surname> <given-names>S</given-names></name></person-group><year iso-8601-date="2019">2019</year><article-title>UMAP reveals cryptic population structure and phenotype heterogeneity in large genomic cohorts</article-title><source>PLOS Genetics</source><volume>15</volume><elocation-id>e1008432</elocation-id><pub-id pub-id-type="doi">10.1371/journal.pgen.1008432</pub-id><pub-id pub-id-type="pmid">31675358</pub-id></element-citation></ref><ref id="bib43"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Dimitriadis</surname> <given-names>G</given-names></name><name><surname>Neto</surname> <given-names>JP</given-names></name><name><surname>Kampff</surname> <given-names>AR</given-names></name></person-group><year iso-8601-date="2018">2018</year><article-title>t-SNE visualization of Large-Scale neural recordings</article-title><source>Neural Computation</source><volume>30</volume><fpage>1750</fpage><lpage>1774</lpage><pub-id pub-id-type="doi">10.1162/neco_a_01097</pub-id><pub-id pub-id-type="pmid">29894653</pub-id></element-citation></ref><ref id="bib44"><element-citation publication-type="preprint"><person-group person-group-type="author"><name><surname>Dimitriadis</surname> <given-names>G</given-names></name><name><surname>Neto</surname> <given-names>JP</given-names></name><name><surname>Aarts</surname> <given-names>A</given-names></name><name><surname>Alexandru</surname> <given-names>A</given-names></name><name><surname>Ballini</surname> <given-names>M</given-names></name><name><surname>Battaglia</surname> <given-names>F</given-names></name><name><surname>Calcaterra</surname> <given-names>L</given-names></name><name><surname>Chen</surname> <given-names>S</given-names></name><name><surname>David</surname> <given-names>F</given-names></name><name><surname>Fi&#225;th</surname> <given-names>R</given-names></name><name><surname>Fraz&#227;o</surname> <given-names>J</given-names></name><name><surname>Geerts</surname> <given-names>JP</given-names></name><name><surname>Gentet</surname> <given-names>LJ</given-names></name><name><surname>Helleputte</surname> <given-names>NV</given-names></name><name><surname>Holzhammer</surname> <given-names>T</given-names></name><name><surname>Hoof</surname> <given-names>C</given-names></name><name><surname>Horv&#225;th</surname> <given-names>D</given-names></name><name><surname>Lopes</surname> <given-names>G</given-names></name><name><surname>Lopez</surname> <given-names>CM</given-names></name><name><surname>Maris</surname> <given-names>E</given-names></name><name><surname>Marques-Smith</surname> <given-names>A</given-names></name><name><surname>M&#225;rton</surname> <given-names>G</given-names></name><name><surname>McNaughton</surname> <given-names>BL</given-names></name><name><surname>Mesz&#233;na</surname> <given-names>D</given-names></name><name><surname>Mitra</surname> <given-names>S</given-names></name><name><surname>Musa</surname> <given-names>S</given-names></name><name><surname>Neves</surname> <given-names>H</given-names></name><name><surname>Nogueira</surname> <given-names>J</given-names></name><name><surname>Orban</surname> <given-names>GA</given-names></name><name><surname>Pothof</surname> <given-names>F</given-names></name><name><surname>Putzeys</surname> <given-names>J</given-names></name><name><surname>Raducanu</surname> <given-names>BC</given-names></name><name><surname>Ruther</surname> <given-names>P</given-names></name><name><surname>Schroeder</surname> <given-names>T</given-names></name><name><surname>Singer</surname> <given-names>W</given-names></name><name><surname>Steinmetz</surname> <given-names>NA</given-names></name><name><surname>Tiesinga</surname> <given-names>P</given-names></name><name><surname>Ulbert</surname> <given-names>I</given-names></name><name><surname>Wang</surname> <given-names>S</given-names></name><name><surname>Welkenhuysen</surname> <given-names>M</given-names></name><name><surname>Kampff</surname> <given-names>AR</given-names></name></person-group><year iso-8601-date="2020">2020</year><article-title>Why not record from every electrode with a CMOS scanning probe?&#160;</article-title><source>bioRxiv</source><pub-id pub-id-type="doi">10.1101/275818</pub-id></element-citation></ref><ref id="bib45"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Ding</surname> <given-names>S</given-names></name><name><surname>Matta</surname> <given-names>SG</given-names></name><name><surname>Zhou</surname> <given-names>FM</given-names></name></person-group><year iso-8601-date="2011">2011</year><article-title>Kv3-like potassium channels are required for sustained high-frequency firing in basal ganglia output neurons</article-title><source>Journal of Neurophysiology</source><volume>105</volume><fpage>554</fpage><lpage>570</lpage><pub-id pub-id-type="doi">10.1152/jn.00707.2010</pub-id><pub-id pub-id-type="pmid">21160004</pub-id></element-citation></ref><ref id="bib46"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Ding</surname> <given-names>L</given-names></name><name><surname>Gold</surname> <given-names>JI</given-names></name></person-group><year iso-8601-date="2012">2012</year><article-title>Neural correlates of perceptual decision making before, during, and after decision commitment in monkey frontal eye field</article-title><source>Cerebral Cortex</source><volume>22</volume><fpage>1052</fpage><lpage>1067</lpage><pub-id pub-id-type="doi">10.1093/cercor/bhr178</pub-id></element-citation></ref><ref id="bib47"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Dolensek</surname> <given-names>N</given-names></name><name><surname>Gehrlach</surname> <given-names>DA</given-names></name><name><surname>Klein</surname> <given-names>AS</given-names></name><name><surname>Gogolla</surname> <given-names>N</given-names></name></person-group><year iso-8601-date="2020">2020</year><article-title>Facial expressions of emotion states and their neuronal correlates in mice</article-title><source>Science</source><volume>368</volume><fpage>89</fpage><lpage>94</lpage><pub-id pub-id-type="doi">10.1126/science.aaz9468</pub-id><pub-id pub-id-type="pmid">32241948</pub-id></element-citation></ref><ref id="bib48"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Dombrowski</surname> <given-names>SM</given-names></name><name><surname>Hilgetag</surname> <given-names>CC</given-names></name><name><surname>Barbas</surname> <given-names>H</given-names></name></person-group><year iso-8601-date="2001">2001</year><article-title>Quantitative architecture distinguishes prefrontal cortical systems in the rhesus monkey</article-title><source>Cerebral Cortex</source><volume>11</volume><fpage>975</fpage><lpage>988</lpage><pub-id pub-id-type="doi">10.1093/cercor/11.10.975</pub-id><pub-id pub-id-type="pmid">11549620</pub-id></element-citation></ref><ref id="bib49"><element-citation publication-type="confproc"><person-group person-group-type="author"><name><surname>Dong</surname> <given-names>W</given-names></name><name><surname>Moses</surname> <given-names>C</given-names></name><name><surname>Li</surname> <given-names>K</given-names></name></person-group><year iso-8601-date="2011">2011</year><article-title>Efficient K-Nearest neighbor graph construction for generic similarity measures</article-title><conf-name>Proceedings of the 20th International Conference on World Wide Web</conf-name><fpage>577</fpage><lpage>586</lpage><pub-id pub-id-type="doi">10.1145/1963405.1963487</pub-id></element-citation></ref><ref id="bib50"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Erisir</surname> <given-names>A</given-names></name><name><surname>Lau</surname> <given-names>D</given-names></name><name><surname>Rudy</surname> <given-names>B</given-names></name><name><surname>Leonard</surname> <given-names>CS</given-names></name></person-group><year iso-8601-date="1999">1999</year><article-title>Function of specific K(+) channels in sustained high-frequency firing of fast-spiking neocortical interneurons</article-title><source>Journal of Neurophysiology</source><volume>82</volume><fpage>2476</fpage><lpage>2489</lpage><pub-id pub-id-type="doi">10.1152/jn.1999.82.5.2476</pub-id><pub-id pub-id-type="pmid">10561420</pub-id></element-citation></ref><ref id="bib51"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Estebanez</surname> <given-names>L</given-names></name><name><surname>Hoffmann</surname> <given-names>D</given-names></name><name><surname>Voigt</surname> <given-names>BC</given-names></name><name><surname>Poulet</surname> <given-names>JFA</given-names></name></person-group><year iso-8601-date="2017">2017</year><article-title>Parvalbumin-Expressing GABAergic neurons in primary motor cortex signal reaching</article-title><source>Cell Reports</source><volume>20</volume><fpage>308</fpage><lpage>318</lpage><pub-id pub-id-type="doi">10.1016/j.celrep.2017.06.044</pub-id><pub-id pub-id-type="pmid">28700934</pub-id></element-citation></ref><ref id="bib52"><element-citation publication-type="confproc"><person-group person-group-type="author"><name><surname>Ester</surname> <given-names>M</given-names></name><name><surname>Kriegel</surname> <given-names>H-P</given-names></name><name><surname>Sander</surname> <given-names>J</given-names></name><name><surname>Xu</surname> <given-names>X</given-names></name></person-group><year iso-8601-date="1996">1996</year><article-title>A Density-Based Algorithm for Discovering Clusters in Large Spatial Databases with Noise</article-title><conf-name>Proceedings of the Second International Conference on Knowledge Discovery and Data Mining, KDD&#8217;96</conf-name><fpage>226</fpage><lpage>231</lpage></element-citation></ref><ref id="bib53"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Estrada</surname> <given-names>LI</given-names></name><name><surname>Robinson</surname> <given-names>AA</given-names></name><name><surname>Amaral</surname> <given-names>AC</given-names></name><name><surname>Giannaris</surname> <given-names>EL</given-names></name><name><surname>Heyworth</surname> <given-names>NC</given-names></name><name><surname>Mortazavi</surname> <given-names>F</given-names></name><name><surname>Ngwenya</surname> <given-names>LB</given-names></name><name><surname>Roberts</surname> <given-names>DE</given-names></name><name><surname>Cabral</surname> <given-names>HJ</given-names></name><name><surname>Killiany</surname> <given-names>RJ</given-names></name><name><surname>Rosene</surname> <given-names>DL</given-names></name></person-group><year iso-8601-date="2017">2017</year><article-title>Evaluation of Long-Term cryostorage of brain tissue sections for quantitative histochemistry</article-title><source>Journal of Histochemistry &amp; Cytochemistry</source><volume>65</volume><fpage>153</fpage><lpage>171</lpage><pub-id pub-id-type="doi">10.1369/0022155416686934</pub-id></element-citation></ref><ref id="bib54"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Finn</surname> <given-names>ES</given-names></name><name><surname>Huber</surname> <given-names>L</given-names></name><name><surname>Jangraw</surname> <given-names>DC</given-names></name><name><surname>Molfese</surname> <given-names>PJ</given-names></name><name><surname>Bandettini</surname> <given-names>PA</given-names></name></person-group><year iso-8601-date="2019">2019</year><article-title>Layer-dependent activity in human prefrontal cortex during working memory</article-title><source>Nature Neuroscience</source><volume>22</volume><fpage>1687</fpage><lpage>1695</lpage><pub-id pub-id-type="doi">10.1038/s41593-019-0487-z</pub-id><pub-id pub-id-type="pmid">31551596</pub-id></element-citation></ref><ref id="bib55"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Georgopoulos</surname> <given-names>AP</given-names></name><name><surname>Schwartz</surname> <given-names>AB</given-names></name><name><surname>Kettner</surname> <given-names>RE</given-names></name></person-group><year iso-8601-date="1986">1986</year><article-title>Neuronal population coding of movement direction</article-title><source>Science</source><volume>233</volume><fpage>1416</fpage><lpage>1419</lpage><pub-id pub-id-type="doi">10.1126/science.3749885</pub-id><pub-id pub-id-type="pmid">3749885</pub-id></element-citation></ref><ref id="bib56"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Gold</surname> <given-names>C</given-names></name><name><surname>Henze</surname> <given-names>DA</given-names></name><name><surname>Koch</surname> <given-names>C</given-names></name><name><surname>Buzs&#225;ki</surname> <given-names>G</given-names></name></person-group><year iso-8601-date="2006">2006</year><article-title>On the origin of the extracellular action potential waveform: a modeling study</article-title><source>Journal of Neurophysiology</source><volume>95</volume><fpage>3113</fpage><lpage>3128</lpage><pub-id pub-id-type="doi">10.1152/jn.00979.2005</pub-id><pub-id pub-id-type="pmid">16467426</pub-id></element-citation></ref><ref id="bib57"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Gold</surname> <given-names>C</given-names></name><name><surname>Girardin</surname> <given-names>CC</given-names></name><name><surname>Martin</surname> <given-names>KA</given-names></name><name><surname>Koch</surname> <given-names>C</given-names></name></person-group><year iso-8601-date="2009">2009</year><article-title>High-amplitude positive spikes recorded extracellularly in cat visual cortex</article-title><source>Journal of Neurophysiology</source><volume>102</volume><fpage>3340</fpage><lpage>3351</lpage><pub-id pub-id-type="doi">10.1152/jn.91365.2008</pub-id><pub-id pub-id-type="pmid">19793873</pub-id></element-citation></ref><ref id="bib58"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Gold</surname> <given-names>JI</given-names></name><name><surname>Shadlen</surname> <given-names>MN</given-names></name></person-group><year iso-8601-date="2007">2007</year><article-title>The neural basis of decision making</article-title><source>Annual Review of Neuroscience</source><volume>30</volume><fpage>535</fpage><lpage>574</lpage><pub-id pub-id-type="doi">10.1146/annurev.neuro.29.051605.113038</pub-id><pub-id pub-id-type="pmid">17600525</pub-id></element-citation></ref><ref id="bib59"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Gouwens</surname> <given-names>NW</given-names></name><name><surname>Berg</surname> <given-names>J</given-names></name><name><surname>Feng</surname> <given-names>D</given-names></name><name><surname>Sorensen</surname> <given-names>SA</given-names></name><name><surname>Zeng</surname> <given-names>H</given-names></name><name><surname>Hawrylycz</surname> <given-names>MJ</given-names></name><name><surname>Koch</surname> <given-names>C</given-names></name><name><surname>Arkhipov</surname> <given-names>A</given-names></name></person-group><year iso-8601-date="2018">2018</year><article-title>Systematic generation of biophysically detailed models for diverse cortical neuron types</article-title><source>Nature Communications</source><volume>9</volume><elocation-id>710</elocation-id><pub-id pub-id-type="doi">10.1038/s41467-017-02718-3</pub-id><pub-id pub-id-type="pmid">29459718</pub-id></element-citation></ref><ref id="bib60"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Gouwens</surname> <given-names>NW</given-names></name><name><surname>Sorensen</surname> <given-names>SA</given-names></name><name><surname>Baftizadeh</surname> <given-names>F</given-names></name><name><surname>Budzillo</surname> <given-names>A</given-names></name><name><surname>Lee</surname> <given-names>BR</given-names></name><name><surname>Jarsky</surname> <given-names>T</given-names></name><name><surname>Alfiler</surname> <given-names>L</given-names></name><name><surname>Baker</surname> <given-names>K</given-names></name><name><surname>Barkan</surname> <given-names>E</given-names></name><name><surname>Berry</surname> <given-names>K</given-names></name><name><surname>Bertagnolli</surname> <given-names>D</given-names></name><name><surname>Bickley</surname> <given-names>K</given-names></name><name><surname>Bomben</surname> <given-names>J</given-names></name><name><surname>Braun</surname> <given-names>T</given-names></name><name><surname>Brouner</surname> <given-names>K</given-names></name><name><surname>Casper</surname> <given-names>T</given-names></name><name><surname>Crichton</surname> <given-names>K</given-names></name><name><surname>Daigle</surname> <given-names>TL</given-names></name><name><surname>Dalley</surname> <given-names>R</given-names></name><name><surname>de Frates</surname> <given-names>RA</given-names></name><name><surname>Dee</surname> <given-names>N</given-names></name><name><surname>Desta</surname> <given-names>T</given-names></name><name><surname>Lee</surname> <given-names>SD</given-names></name><name><surname>Dotson</surname> <given-names>N</given-names></name><name><surname>Egdorf</surname> <given-names>T</given-names></name><name><surname>Ellingwood</surname> <given-names>L</given-names></name><name><surname>Enstrom</surname> <given-names>R</given-names></name><name><surname>Esposito</surname> <given-names>L</given-names></name><name><surname>Farrell</surname> <given-names>C</given-names></name><name><surname>Feng</surname> <given-names>D</given-names></name><name><surname>Fong</surname> <given-names>O</given-names></name><name><surname>Gala</surname> <given-names>R</given-names></name><name><surname>Gamlin</surname> <given-names>C</given-names></name><name><surname>Gary</surname> <given-names>A</given-names></name><name><surname>Glandon</surname> <given-names>A</given-names></name><name><surname>Goldy</surname> <given-names>J</given-names></name><name><surname>Gorham</surname> <given-names>M</given-names></name><name><surname>Graybuck</surname> <given-names>L</given-names></name><name><surname>Gu</surname> <given-names>H</given-names></name><name><surname>Hadley</surname> <given-names>K</given-names></name><name><surname>Hawrylycz</surname> <given-names>MJ</given-names></name><name><surname>Henry</surname> <given-names>AM</given-names></name><name><surname>Hill</surname> <given-names>D</given-names></name><name><surname>Hupp</surname> <given-names>M</given-names></name><name><surname>Kebede</surname> <given-names>S</given-names></name><name><surname>Kim</surname> <given-names>TK</given-names></name><name><surname>Kim</surname> <given-names>L</given-names></name><name><surname>Kroll</surname> <given-names>M</given-names></name><name><surname>Lee</surname> <given-names>C</given-names></name><name><surname>Link</surname> <given-names>KE</given-names></name><name><surname>Mallory</surname> <given-names>M</given-names></name><name><surname>Mann</surname> <given-names>R</given-names></name><name><surname>Maxwell</surname> <given-names>M</given-names></name><name><surname>McGraw</surname> <given-names>M</given-names></name><name><surname>McMillen</surname> <given-names>D</given-names></name><name><surname>Mukora</surname> <given-names>A</given-names></name><name><surname>Ng</surname> <given-names>L</given-names></name><name><surname>Ng</surname> <given-names>L</given-names></name><name><surname>Ngo</surname> <given-names>K</given-names></name><name><surname>Nicovich</surname> <given-names>PR</given-names></name><name><surname>Oldre</surname> <given-names>A</given-names></name><name><surname>Park</surname> <given-names>D</given-names></name><name><surname>Peng</surname> <given-names>H</given-names></name><name><surname>Penn</surname> <given-names>O</given-names></name><name><surname>Pham</surname> <given-names>T</given-names></name><name><surname>Pom</surname> <given-names>A</given-names></name><name><surname>Popovi&#263;</surname> <given-names>Z</given-names></name><name><surname>Potekhina</surname> <given-names>L</given-names></name><name><surname>Rajanbabu</surname> <given-names>R</given-names></name><name><surname>Ransford</surname> <given-names>S</given-names></name><name><surname>Reid</surname> <given-names>D</given-names></name><name><surname>Rimorin</surname> <given-names>C</given-names></name><name><surname>Robertson</surname> <given-names>M</given-names></name><name><surname>Ronellenfitch</surname> <given-names>K</given-names></name><name><surname>Ruiz</surname> <given-names>A</given-names></name><name><surname>Sandman</surname> <given-names>D</given-names></name><name><surname>Smith</surname> <given-names>K</given-names></name><name><surname>Sulc</surname> <given-names>J</given-names></name><name><surname>Sunkin</surname> <given-names>SM</given-names></name><name><surname>Szafer</surname> <given-names>A</given-names></name><name><surname>Tieu</surname> <given-names>M</given-names></name><name><surname>Torkelson</surname> <given-names>A</given-names></name><name><surname>Trinh</surname> <given-names>J</given-names></name><name><surname>Tung</surname> <given-names>H</given-names></name><name><surname>Wakeman</surname> <given-names>W</given-names></name><name><surname>Ward</surname> <given-names>K</given-names></name><name><surname>Williams</surname> <given-names>G</given-names></name><name><surname>Zhou</surname> <given-names>Z</given-names></name><name><surname>Ting</surname> <given-names>JT</given-names></name><name><surname>Arkhipov</surname> <given-names>A</given-names></name><name><surname>S&#252;mb&#252;l</surname> <given-names>U</given-names></name><name><surname>Lein</surname> <given-names>ES</given-names></name><name><surname>Koch</surname> <given-names>C</given-names></name><name><surname>Yao</surname> <given-names>Z</given-names></name><name><surname>Tasic</surname> <given-names>B</given-names></name><name><surname>Berg</surname> <given-names>J</given-names></name><name><surname>Murphy</surname> <given-names>GJ</given-names></name><name><surname>Zeng</surname> <given-names>H</given-names></name></person-group><year iso-8601-date="2020">2020</year><article-title>Integrated morphoelectric and transcriptomic classification of cortical GABAergic cells</article-title><source>Cell</source><volume>183</volume><fpage>935</fpage><lpage>953</lpage><pub-id pub-id-type="doi">10.1016/j.cell.2020.09.057</pub-id><pub-id pub-id-type="pmid">33186530</pub-id></element-citation></ref><ref id="bib61"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Gur</surname> <given-names>M</given-names></name><name><surname>Beylin</surname> <given-names>A</given-names></name><name><surname>Snodderly</surname> <given-names>DM</given-names></name></person-group><year iso-8601-date="1999">1999</year><article-title>Physiological properties of macaque V1 neurons are correlated with extracellular spike amplitude, duration, and polarity</article-title><source>Journal of Neurophysiology</source><volume>82</volume><fpage>1451</fpage><lpage>1464</lpage><pub-id pub-id-type="doi">10.1152/jn.1999.82.3.1451</pub-id><pub-id pub-id-type="pmid">10482761</pub-id></element-citation></ref><ref id="bib62"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Hangya</surname> <given-names>B</given-names></name><name><surname>Ranade</surname> <given-names>SP</given-names></name><name><surname>Lorenc</surname> <given-names>M</given-names></name><name><surname>Kepecs</surname> <given-names>A</given-names></name></person-group><year iso-8601-date="2015">2015</year><article-title>Central cholinergic neurons are rapidly recruited by reinforcement feedback</article-title><source>Cell</source><volume>162</volume><fpage>1155</fpage><lpage>1168</lpage><pub-id pub-id-type="doi">10.1016/j.cell.2015.07.057</pub-id><pub-id pub-id-type="pmid">26317475</pub-id></element-citation></ref><ref id="bib63"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Hanks</surname> <given-names>TD</given-names></name><name><surname>Kopec</surname> <given-names>CD</given-names></name><name><surname>Brunton</surname> <given-names>BW</given-names></name><name><surname>Duan</surname> <given-names>CA</given-names></name><name><surname>Erlich</surname> <given-names>JC</given-names></name><name><surname>Brody</surname> <given-names>CD</given-names></name></person-group><year iso-8601-date="2015">2015</year><article-title>Distinct relationships of parietal and prefrontal cortices to evidence accumulation</article-title><source>Nature</source><volume>520</volume><fpage>220</fpage><lpage>223</lpage><pub-id pub-id-type="doi">10.1038/nature14066</pub-id><pub-id pub-id-type="pmid">25600270</pub-id></element-citation></ref><ref id="bib64"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Harris</surname> <given-names>KD</given-names></name><name><surname>Henze</surname> <given-names>DA</given-names></name><name><surname>Csicsvari</surname> <given-names>J</given-names></name><name><surname>Hirase</surname> <given-names>H</given-names></name><name><surname>Buzs&#225;ki</surname> <given-names>G</given-names></name></person-group><year iso-8601-date="2000">2000</year><article-title>Accuracy of tetrode spike separation as determined by simultaneous intracellular and extracellular measurements</article-title><source>Journal of Neurophysiology</source><volume>84</volume><fpage>401</fpage><lpage>414</lpage><pub-id pub-id-type="doi">10.1152/jn.2000.84.1.401</pub-id><pub-id pub-id-type="pmid">10899214</pub-id></element-citation></ref><ref id="bib65"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Henze</surname> <given-names>DA</given-names></name><name><surname>Borhegyi</surname> <given-names>Z</given-names></name><name><surname>Csicsvari</surname> <given-names>J</given-names></name><name><surname>Mamiya</surname> <given-names>A</given-names></name><name><surname>Harris</surname> <given-names>KD</given-names></name><name><surname>Buzs&#225;ki</surname> <given-names>G</given-names></name></person-group><year iso-8601-date="2000">2000</year><article-title>Intracellular features predicted by extracellular recordings in the Hippocampus in vivo</article-title><source>Journal of Neurophysiology</source><volume>84</volume><fpage>390</fpage><lpage>400</lpage><pub-id pub-id-type="doi">10.1152/jn.2000.84.1.390</pub-id><pub-id pub-id-type="pmid">10899213</pub-id></element-citation></ref><ref id="bib66"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Hodge</surname> <given-names>RD</given-names></name><name><surname>Bakken</surname> <given-names>TE</given-names></name><name><surname>Miller</surname> <given-names>JA</given-names></name><name><surname>Smith</surname> <given-names>KA</given-names></name><name><surname>Barkan</surname> <given-names>ER</given-names></name><name><surname>Graybuck</surname> <given-names>LT</given-names></name><name><surname>Close</surname> <given-names>JL</given-names></name><name><surname>Long</surname> <given-names>B</given-names></name><name><surname>Johansen</surname> <given-names>N</given-names></name><name><surname>Penn</surname> <given-names>O</given-names></name><name><surname>Yao</surname> <given-names>Z</given-names></name><name><surname>Eggermont</surname> <given-names>J</given-names></name><name><surname>H&#246;llt</surname> <given-names>T</given-names></name><name><surname>Levi</surname> <given-names>BP</given-names></name><name><surname>Shehata</surname> <given-names>SI</given-names></name><name><surname>Aevermann</surname> <given-names>B</given-names></name><name><surname>Beller</surname> <given-names>A</given-names></name><name><surname>Bertagnolli</surname> <given-names>D</given-names></name><name><surname>Brouner</surname> <given-names>K</given-names></name><name><surname>Casper</surname> <given-names>T</given-names></name><name><surname>Cobbs</surname> <given-names>C</given-names></name><name><surname>Dalley</surname> <given-names>R</given-names></name><name><surname>Dee</surname> <given-names>N</given-names></name><name><surname>Ding</surname> <given-names>SL</given-names></name><name><surname>Ellenbogen</surname> <given-names>RG</given-names></name><name><surname>Fong</surname> <given-names>O</given-names></name><name><surname>Garren</surname> <given-names>E</given-names></name><name><surname>Goldy</surname> <given-names>J</given-names></name><name><surname>Gwinn</surname> <given-names>RP</given-names></name><name><surname>Hirschstein</surname> <given-names>D</given-names></name><name><surname>Keene</surname> <given-names>CD</given-names></name><name><surname>Keshk</surname> <given-names>M</given-names></name><name><surname>Ko</surname> <given-names>AL</given-names></name><name><surname>Lathia</surname> <given-names>K</given-names></name><name><surname>Mahfouz</surname> <given-names>A</given-names></name><name><surname>Maltzer</surname> <given-names>Z</given-names></name><name><surname>McGraw</surname> <given-names>M</given-names></name><name><surname>Nguyen</surname> <given-names>TN</given-names></name><name><surname>Nyhus</surname> <given-names>J</given-names></name><name><surname>Ojemann</surname> <given-names>JG</given-names></name><name><surname>Oldre</surname> <given-names>A</given-names></name><name><surname>Parry</surname> <given-names>S</given-names></name><name><surname>Reynolds</surname> <given-names>S</given-names></name><name><surname>Rimorin</surname> <given-names>C</given-names></name><name><surname>Shapovalova</surname> <given-names>NV</given-names></name><name><surname>Somasundaram</surname> <given-names>S</given-names></name><name><surname>Szafer</surname> <given-names>A</given-names></name><name><surname>Thomsen</surname> <given-names>ER</given-names></name><name><surname>Tieu</surname> <given-names>M</given-names></name><name><surname>Quon</surname> <given-names>G</given-names></name><name><surname>Scheuermann</surname> <given-names>RH</given-names></name><name><surname>Yuste</surname> <given-names>R</given-names></name><name><surname>Sunkin</surname> <given-names>SM</given-names></name><name><surname>Lelieveldt</surname> <given-names>B</given-names></name><name><surname>Feng</surname> <given-names>D</given-names></name><name><surname>Ng</surname> <given-names>L</given-names></name><name><surname>Bernard</surname> <given-names>A</given-names></name><name><surname>Hawrylycz</surname> <given-names>M</given-names></name><name><surname>Phillips</surname> <given-names>JW</given-names></name><name><surname>Tasic</surname> <given-names>B</given-names></name><name><surname>Zeng</surname> <given-names>H</given-names></name><name><surname>Jones</surname> <given-names>AR</given-names></name><name><surname>Koch</surname> <given-names>C</given-names></name><name><surname>Lein</surname> <given-names>ES</given-names></name></person-group><year iso-8601-date="2019">2019</year><article-title>Conserved cell types with divergent features in human versus mouse cortex</article-title><source>Nature</source><volume>573</volume><fpage>61</fpage><lpage>68</lpage><pub-id pub-id-type="doi">10.1038/s41586-019-1506-7</pub-id><pub-id pub-id-type="pmid">31435019</pub-id></element-citation></ref><ref id="bib67"><element-citation publication-type="preprint"><person-group person-group-type="author"><name><surname>Hsu</surname> <given-names>AI</given-names></name><name><surname>Yttri</surname> <given-names>EA</given-names></name></person-group><year iso-8601-date="2020">2020</year><article-title>B-soid: an open source unsupervised algorithm for discovery of spontaneous behaviors</article-title><source>bioRxiv</source><pub-id pub-id-type="doi">10.1101/770271</pub-id></element-citation></ref><ref id="bib68"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Hubel</surname> <given-names>DH</given-names></name><name><surname>Wiesel</surname> <given-names>TN</given-names></name></person-group><year iso-8601-date="1959">1959</year><article-title>Receptive fields of single neurones in the cat's striate cortex</article-title><source>The Journal of Physiology</source><volume>148</volume><fpage>574</fpage><lpage>591</lpage><pub-id pub-id-type="doi">10.1113/jphysiol.1959.sp006308</pub-id><pub-id pub-id-type="pmid">14403679</pub-id></element-citation></ref><ref id="bib69"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Hussar</surname> <given-names>CR</given-names></name><name><surname>Pasternak</surname> <given-names>T</given-names></name></person-group><year iso-8601-date="2009">2009</year><article-title>Flexibility of sensory representations in prefrontal cortex depends on cell type</article-title><source>Neuron</source><volume>64</volume><fpage>730</fpage><lpage>743</lpage><pub-id pub-id-type="doi">10.1016/j.neuron.2009.11.018</pub-id><pub-id pub-id-type="pmid">20005828</pub-id></element-citation></ref><ref id="bib70"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Ichinohe</surname> <given-names>N</given-names></name><name><surname>Watakabe</surname> <given-names>A</given-names></name><name><surname>Miyashita</surname> <given-names>T</given-names></name><name><surname>Yamamori</surname> <given-names>T</given-names></name><name><surname>Hashikawa</surname> <given-names>T</given-names></name><name><surname>Rockland</surname> <given-names>KS</given-names></name></person-group><year iso-8601-date="2004">2004</year><article-title>A voltage-gated potassium channel, Kv3.1b, is expressed by a subpopulation of large pyramidal neurons in layer 5 of the macaque monkey cortex</article-title><source>Neuroscience</source><volume>129</volume><fpage>179</fpage><lpage>185</lpage><pub-id pub-id-type="doi">10.1016/j.neuroscience.2004.08.005</pub-id><pub-id pub-id-type="pmid">15489040</pub-id></element-citation></ref><ref id="bib71"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Jia</surname> <given-names>X</given-names></name><name><surname>Siegle</surname> <given-names>JH</given-names></name><name><surname>Bennett</surname> <given-names>C</given-names></name><name><surname>Gale</surname> <given-names>SD</given-names></name><name><surname>Denman</surname> <given-names>DJ</given-names></name><name><surname>Koch</surname> <given-names>C</given-names></name><name><surname>Olsen</surname> <given-names>SR</given-names></name></person-group><year iso-8601-date="2019">2019</year><article-title>High-density extracellular probes reveal dendritic backpropagation and facilitate neuron classification</article-title><source>Journal of Neurophysiology</source><volume>121</volume><fpage>1831</fpage><lpage>1847</lpage><pub-id pub-id-type="doi">10.1152/jn.00680.2018</pub-id><pub-id pub-id-type="pmid">30840526</pub-id></element-citation></ref><ref id="bib72"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Johnston</surname> <given-names>K</given-names></name><name><surname>DeSouza</surname> <given-names>JF</given-names></name><name><surname>Everling</surname> <given-names>S</given-names></name></person-group><year iso-8601-date="2009">2009</year><article-title>Monkey prefrontal cortical pyramidal and putative interneurons exhibit differential patterns of activity between prosaccade and antisaccade tasks</article-title><source>Journal of Neuroscience</source><volume>29</volume><fpage>5516</fpage><lpage>5524</lpage><pub-id pub-id-type="doi">10.1523/JNEUROSCI.5953-08.2009</pub-id><pub-id pub-id-type="pmid">19403819</pub-id></element-citation></ref><ref id="bib73"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Jun</surname> <given-names>JJ</given-names></name><name><surname>Steinmetz</surname> <given-names>NA</given-names></name><name><surname>Siegle</surname> <given-names>JH</given-names></name><name><surname>Denman</surname> <given-names>DJ</given-names></name><name><surname>Bauza</surname> <given-names>M</given-names></name><name><surname>Barbarits</surname> <given-names>B</given-names></name><name><surname>Lee</surname> <given-names>AK</given-names></name><name><surname>Anastassiou</surname> <given-names>CA</given-names></name><name><surname>Andrei</surname> <given-names>A</given-names></name><name><surname>Ayd&#305;n</surname> <given-names>&#199;</given-names></name><name><surname>Barbic</surname> <given-names>M</given-names></name><name><surname>Blanche</surname> <given-names>TJ</given-names></name><name><surname>Bonin</surname> <given-names>V</given-names></name><name><surname>Couto</surname> <given-names>J</given-names></name><name><surname>Dutta</surname> <given-names>B</given-names></name><name><surname>Gratiy</surname> <given-names>SL</given-names></name><name><surname>Gutnisky</surname> <given-names>DA</given-names></name><name><surname>H&#228;usser</surname> <given-names>M</given-names></name><name><surname>Karsh</surname> <given-names>B</given-names></name><name><surname>Ledochowitsch</surname> <given-names>P</given-names></name><name><surname>Lopez</surname> <given-names>CM</given-names></name><name><surname>Mitelut</surname> <given-names>C</given-names></name><name><surname>Musa</surname> <given-names>S</given-names></name><name><surname>Okun</surname> <given-names>M</given-names></name><name><surname>Pachitariu</surname> <given-names>M</given-names></name><name><surname>Putzeys</surname> <given-names>J</given-names></name><name><surname>Rich</surname> <given-names>PD</given-names></name><name><surname>Rossant</surname> <given-names>C</given-names></name><name><surname>Sun</surname> <given-names>WL</given-names></name><name><surname>Svoboda</surname> <given-names>K</given-names></name><name><surname>Carandini</surname> <given-names>M</given-names></name><name><surname>Harris</surname> <given-names>KD</given-names></name><name><surname>Koch</surname> <given-names>C</given-names></name><name><surname>O'Keefe</surname> <given-names>J</given-names></name><name><surname>Harris</surname> <given-names>TD</given-names></name></person-group><year iso-8601-date="2017">2017</year><article-title>Fully integrated silicon probes for high-density recording of neural activity</article-title><source>Nature</source><volume>551</volume><fpage>232</fpage><lpage>236</lpage><pub-id pub-id-type="doi">10.1038/nature24636</pub-id><pub-id pub-id-type="pmid">29120427</pub-id></element-citation></ref><ref id="bib74"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Kaczmarek</surname> <given-names>LK</given-names></name><name><surname>Zhang</surname> <given-names>Y</given-names></name></person-group><year iso-8601-date="2017">2017</year><article-title>Kv3 channels: enablers of rapid firing, neurotransmitter release, and neuronal endurance</article-title><source>Physiological Reviews</source><volume>97</volume><fpage>1431</fpage><lpage>1468</lpage><pub-id pub-id-type="doi">10.1152/physrev.00002.2017</pub-id><pub-id pub-id-type="pmid">28904001</pub-id></element-citation></ref><ref id="bib75"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Katai</surname> <given-names>S</given-names></name><name><surname>Kato</surname> <given-names>K</given-names></name><name><surname>Unno</surname> <given-names>S</given-names></name><name><surname>Kang</surname> <given-names>Y</given-names></name><name><surname>Saruwatari</surname> <given-names>M</given-names></name><name><surname>Ishikawa</surname> <given-names>N</given-names></name><name><surname>Inoue</surname> <given-names>M</given-names></name><name><surname>Mikami</surname> <given-names>A</given-names></name></person-group><year iso-8601-date="2010">2010</year><article-title>Classification of extracellularly recorded neurons by their discharge patterns and their correlates with intracellularly identified neuronal types in the frontal cortex of behaving monkeys</article-title><source>European Journal of Neuroscience</source><volume>31</volume><fpage>1322</fpage><lpage>1338</lpage><pub-id pub-id-type="doi">10.1111/j.1460-9568.2010.07150.x</pub-id><pub-id pub-id-type="pmid">20345909</pub-id></element-citation></ref><ref id="bib76"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Kaufman</surname> <given-names>MT</given-names></name><name><surname>Churchland</surname> <given-names>MM</given-names></name><name><surname>Santhanam</surname> <given-names>G</given-names></name><name><surname>Yu</surname> <given-names>BM</given-names></name><name><surname>Afshar</surname> <given-names>A</given-names></name><name><surname>Ryu</surname> <given-names>SI</given-names></name><name><surname>Shenoy</surname> <given-names>KV</given-names></name></person-group><year iso-8601-date="2010">2010</year><article-title>Roles of monkey premotor neuron classes in movement preparation and execution</article-title><source>Journal of Neurophysiology</source><volume>104</volume><fpage>799</fpage><lpage>810</lpage><pub-id pub-id-type="doi">10.1152/jn.00231.2009</pub-id><pub-id pub-id-type="pmid">20538784</pub-id></element-citation></ref><ref id="bib77"><element-citation publication-type="confproc"><person-group person-group-type="author"><name><surname>Kaufman</surname> <given-names>S</given-names></name><name><surname>Rosset</surname> <given-names>S</given-names></name><name><surname>Perlich</surname> <given-names>C</given-names></name></person-group><year iso-8601-date="2011">2011</year><article-title>Leakage in data mining: formulation, detection, and avoidance</article-title><conf-name>Proceedings of the 2011 Conference on Knowledge Discovery in Data Mining</conf-name><fpage>556</fpage><lpage>563</lpage></element-citation></ref><ref id="bib78"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Kaufman</surname> <given-names>MT</given-names></name><name><surname>Churchland</surname> <given-names>MM</given-names></name><name><surname>Shenoy</surname> <given-names>KV</given-names></name></person-group><year iso-8601-date="2013">2013</year><article-title>The roles of monkey M1 neuron classes in movement preparation and execution</article-title><source>Journal of Neurophysiology</source><volume>110</volume><fpage>817</fpage><lpage>825</lpage><pub-id pub-id-type="doi">10.1152/jn.00892.2011</pub-id><pub-id pub-id-type="pmid">23699057</pub-id></element-citation></ref><ref id="bib79"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Kelly</surname> <given-names>JG</given-names></name><name><surname>Garc&#237;a-Mar&#237;n</surname> <given-names>V</given-names></name><name><surname>Rudy</surname> <given-names>B</given-names></name><name><surname>Hawken</surname> <given-names>MJ</given-names></name></person-group><year iso-8601-date="2019">2019</year><article-title>Densities and laminar distributions of Kv3.1b-, PV-, GABA-, and SMI-32-Immunoreactive Neurons in Macaque Area V1</article-title><source>Cerebral Cortex</source><volume>29</volume><fpage>1921</fpage><lpage>1937</lpage><pub-id pub-id-type="doi">10.1093/cercor/bhy072</pub-id></element-citation></ref><ref id="bib80"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Kelly</surname> <given-names>JG</given-names></name><name><surname>Hawken</surname> <given-names>MJ</given-names></name></person-group><year iso-8601-date="2020">2020</year><article-title>GABAergic and non-GABAergic subpopulations of Kv3.1b-expressing neurons in macaque V2 and MT: laminar distributions and proportion of total neuronal population</article-title><source>Brain Structure and Function</source><volume>225</volume><fpage>1135</fpage><lpage>1152</lpage><pub-id pub-id-type="doi">10.1007/s00429-020-02065-y</pub-id><pub-id pub-id-type="pmid">32266458</pub-id></element-citation></ref><ref id="bib81"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Kleiner</surname> <given-names>M</given-names></name><name><surname>Brainard</surname> <given-names>D</given-names></name><name><surname>Pelli</surname> <given-names>D</given-names></name><name><surname>Ingling</surname> <given-names>A</given-names></name><name><surname>Murray</surname> <given-names>R</given-names></name><name><surname>Broussard</surname> <given-names>C</given-names></name></person-group><year iso-8601-date="2007">2007</year><article-title>What&#8217;s new in psychtoolbox-3</article-title><source>Perception</source><volume>36</volume><fpage>1</fpage><lpage>16</lpage></element-citation></ref><ref id="bib82"><element-citation publication-type="preprint"><person-group person-group-type="author"><name><surname>Kleinman</surname> <given-names>M</given-names></name><name><surname>Chandrasekaran</surname> <given-names>C</given-names></name><name><surname>Kao</surname> <given-names>JC</given-names></name></person-group><year iso-8601-date="2019">2019</year><article-title>Recurrent neural network models of multi-area computation underlying decision-making</article-title><source>bioRxiv</source><pub-id pub-id-type="doi">10.1101/798553</pub-id></element-citation></ref><ref id="bib83"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Klemp&#237;&#345;</surname> <given-names>O</given-names></name><name><surname>Krupi&#269;ka</surname> <given-names>R</given-names></name><name><surname>Kr&#367;&#353;ek</surname> <given-names>J</given-names></name><name><surname>Dittert</surname> <given-names>I</given-names></name><name><surname>Petr&#225;kov&#225;</surname> <given-names>V</given-names></name><name><surname>Petr&#225;k</surname> <given-names>V</given-names></name><name><surname>Taylor</surname> <given-names>A</given-names></name></person-group><year iso-8601-date="2020">2020</year><article-title>Application of spike sorting algorithm to neuronal signals originated from boron doped diamond micro-electrode arrays</article-title><source>Physiological Research</source><volume>69</volume><fpage>529</fpage><lpage>536</lpage><pub-id pub-id-type="doi">10.33549/physiolres.934366</pub-id><pub-id pub-id-type="pmid">32469239</pub-id></element-citation></ref><ref id="bib84"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Kobak</surname> <given-names>D</given-names></name><name><surname>Brendel</surname> <given-names>W</given-names></name><name><surname>Constantinidis</surname> <given-names>C</given-names></name><name><surname>Feierstein</surname> <given-names>CE</given-names></name><name><surname>Kepecs</surname> <given-names>A</given-names></name><name><surname>Mainen</surname> <given-names>ZF</given-names></name><name><surname>Qi</surname> <given-names>XL</given-names></name><name><surname>Romo</surname> <given-names>R</given-names></name><name><surname>Uchida</surname> <given-names>N</given-names></name><name><surname>Machens</surname> <given-names>CK</given-names></name></person-group><year iso-8601-date="2016">2016</year><article-title>Demixed principal component analysis of neural population data</article-title><source>eLife</source><volume>5</volume><elocation-id>e10989</elocation-id><pub-id pub-id-type="doi">10.7554/eLife.10989</pub-id><pub-id pub-id-type="pmid">27067378</pub-id></element-citation></ref><ref id="bib85"><element-citation publication-type="preprint"><person-group person-group-type="author"><name><surname>Kobak</surname> <given-names>D</given-names></name><name><surname>Linderman</surname> <given-names>GC</given-names></name></person-group><year iso-8601-date="2019">2019</year><article-title>UMAP does not preserve global structure any better than t-SNE when using the same initialization</article-title><source>bioRxiv</source><pub-id pub-id-type="doi">10.1101/2019.12.19.877522</pub-id></element-citation></ref><ref id="bib86"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Krienen</surname> <given-names>FM</given-names></name><name><surname>Goldman</surname> <given-names>M</given-names></name><name><surname>Zhang</surname> <given-names>Q</given-names></name><name><surname>C H Del Rosario</surname> <given-names>R</given-names></name><name><surname>Florio</surname> <given-names>M</given-names></name><name><surname>Machold</surname> <given-names>R</given-names></name><name><surname>Saunders</surname> <given-names>A</given-names></name><name><surname>Levandowski</surname> <given-names>K</given-names></name><name><surname>Zaniewski</surname> <given-names>H</given-names></name><name><surname>Schuman</surname> <given-names>B</given-names></name><name><surname>Wu</surname> <given-names>C</given-names></name><name><surname>Lutservitz</surname> <given-names>A</given-names></name><name><surname>Mullally</surname> <given-names>CD</given-names></name><name><surname>Reed</surname> <given-names>N</given-names></name><name><surname>Bien</surname> <given-names>E</given-names></name><name><surname>Bortolin</surname> <given-names>L</given-names></name><name><surname>Fernandez-Otero</surname> <given-names>M</given-names></name><name><surname>Lin</surname> <given-names>JD</given-names></name><name><surname>Wysoker</surname> <given-names>A</given-names></name><name><surname>Nemesh</surname> <given-names>J</given-names></name><name><surname>Kulp</surname> <given-names>D</given-names></name><name><surname>Burns</surname> <given-names>M</given-names></name><name><surname>Tkachev</surname> <given-names>V</given-names></name><name><surname>Smith</surname> <given-names>R</given-names></name><name><surname>Walsh</surname> <given-names>CA</given-names></name><name><surname>Dimidschstein</surname> <given-names>J</given-names></name><name><surname>Rudy</surname> <given-names>B</given-names></name><name><surname>S Kean</surname> <given-names>L</given-names></name><name><surname>Berretta</surname> <given-names>S</given-names></name><name><surname>Fishell</surname> <given-names>G</given-names></name><name><surname>Feng</surname> <given-names>G</given-names></name><name><surname>McCarroll</surname> <given-names>SA</given-names></name></person-group><year iso-8601-date="2020">2020</year><article-title>Innovations present in the primate interneuron repertoire</article-title><source>Nature</source><volume>586</volume><fpage>262</fpage><lpage>269</lpage><pub-id pub-id-type="doi">10.1038/s41586-020-2781-z</pub-id><pub-id pub-id-type="pmid">32999462</pub-id></element-citation></ref><ref id="bib87"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Krimer</surname> <given-names>LS</given-names></name><name><surname>Zaitsev</surname> <given-names>AV</given-names></name><name><surname>Czanner</surname> <given-names>G</given-names></name><name><surname>Kr&#246;ner</surname> <given-names>S</given-names></name><name><surname>Gonz&#225;lez-Burgos</surname> <given-names>G</given-names></name><name><surname>Povysheva</surname> <given-names>NV</given-names></name><name><surname>Iyengar</surname> <given-names>S</given-names></name><name><surname>Barrionuevo</surname> <given-names>G</given-names></name><name><surname>Lewis</surname> <given-names>DA</given-names></name></person-group><year iso-8601-date="2005">2005</year><article-title>Cluster analysis-based physiological classification and morphological properties of inhibitory neurons in layers 2-3 of monkey dorsolateral prefrontal cortex</article-title><source>Journal of Neurophysiology</source><volume>94</volume><fpage>3009</fpage><lpage>3022</lpage><pub-id pub-id-type="doi">10.1152/jn.00156.2005</pub-id><pub-id pub-id-type="pmid">15987765</pub-id></element-citation></ref><ref id="bib88"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Kvitsiani</surname> <given-names>D</given-names></name><name><surname>Ranade</surname> <given-names>S</given-names></name><name><surname>Hangya</surname> <given-names>B</given-names></name><name><surname>Taniguchi</surname> <given-names>H</given-names></name><name><surname>Huang</surname> <given-names>JZ</given-names></name><name><surname>Kepecs</surname> <given-names>A</given-names></name></person-group><year iso-8601-date="2013">2013</year><article-title>Distinct behavioural and network correlates of two interneuron types in prefrontal cortex</article-title><source>Nature</source><volume>498</volume><fpage>363</fpage><lpage>366</lpage><pub-id pub-id-type="doi">10.1038/nature12176</pub-id><pub-id pub-id-type="pmid">23708967</pub-id></element-citation></ref><ref id="bib89"><element-citation publication-type="book"><person-group person-group-type="author"><name><surname>Lambiotte</surname> <given-names>R</given-names></name></person-group><year iso-8601-date="2007">2007</year><source>Finding Communities at Different Resolutions in Large Networks</source><publisher-name>Institute for Mathematical Sciences</publisher-name></element-citation></ref><ref id="bib90"><element-citation publication-type="preprint"><person-group person-group-type="author"><name><surname>Lambiotte</surname> <given-names>R</given-names></name><name><surname>Delvenne</surname> <given-names>J-C</given-names></name><name><surname>Barahona</surname> <given-names>M</given-names></name></person-group><year iso-8601-date="2008">2008</year><article-title>Laplacian dynamics and multiscale modular structure in networks</article-title><source>arXiv</source><ext-link ext-link-type="uri" xlink:href="https://arxiv.org/abs/0812.1770">https://arxiv.org/abs/0812.1770</ext-link></element-citation></ref><ref id="bib91"><element-citation publication-type="software"><person-group person-group-type="author"><name><surname>Lee</surname> <given-names>EK</given-names></name><name><surname>Balasubramanian</surname> <given-names>H</given-names></name><name><surname>Tsolias</surname> <given-names>A</given-names></name><name><surname>Anakwe</surname> <given-names>S</given-names></name><name><surname>Medalla</surname> <given-names>M</given-names></name><name><surname>Shenoy</surname> <given-names>KV</given-names></name><name><surname>Chandrasekaran</surname> <given-names>C</given-names></name></person-group><year iso-8601-date="2021">2021</year><data-title>Wavemap Analysis of Extracellular Waveforms From Monkey Premotor Cortex During Decision-Making</data-title><source>Zenodo</source><version designator="1">1</version><ext-link ext-link-type="uri" xlink:href="https://doi.org/10.5281/zenodo.5123316">https://doi.org/10.5281/zenodo.5123316</ext-link></element-citation></ref><ref id="bib92"><element-citation publication-type="book"><person-group person-group-type="author"><name><surname>Lee</surname> <given-names>JA</given-names></name><name><surname>Verleysen</surname> <given-names>M</given-names></name></person-group><year iso-8601-date="2007">2007</year><source>Nonlinear Dimensionality Reduction</source><publisher-loc>New York</publisher-loc><publisher-name>Springer</publisher-name><pub-id pub-id-type="doi">10.1007/978-0-387-39351-3</pub-id></element-citation></ref><ref id="bib93"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Lemon</surname> <given-names>RN</given-names></name><name><surname>Baker</surname> <given-names>SN</given-names></name><name><surname>Kraskov</surname> <given-names>A</given-names></name></person-group><year iso-8601-date="2021">2021</year><article-title>Classification of cortical neurons by spike shape and the identification of pyramidal neurons</article-title><source>Cerebral Cortex</source><volume>4</volume><elocation-id>bhab147</elocation-id><pub-id pub-id-type="doi">10.1093/cercor/bhab147</pub-id></element-citation></ref><ref id="bib94"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Linderman</surname> <given-names>GC</given-names></name><name><surname>Rachh</surname> <given-names>M</given-names></name><name><surname>Hoskins</surname> <given-names>JG</given-names></name><name><surname>Steinerberger</surname> <given-names>S</given-names></name><name><surname>Kluger</surname> <given-names>Y</given-names></name></person-group><year iso-8601-date="2019">2019</year><article-title>Fast interpolation-based t-SNE for improved visualization of single-cell RNA-seq data</article-title><source>Nature Methods</source><volume>16</volume><fpage>243</fpage><lpage>245</lpage><pub-id pub-id-type="doi">10.1038/s41592-018-0308-4</pub-id><pub-id pub-id-type="pmid">30742040</pub-id></element-citation></ref><ref id="bib95"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Lui</surname> <given-names>JH</given-names></name><name><surname>Nguyen</surname> <given-names>ND</given-names></name><name><surname>Grutzner</surname> <given-names>SM</given-names></name><name><surname>Darmanis</surname> <given-names>S</given-names></name><name><surname>Peixoto</surname> <given-names>D</given-names></name><name><surname>Wagner</surname> <given-names>MJ</given-names></name><name><surname>Allen</surname> <given-names>WE</given-names></name><name><surname>Kebschull</surname> <given-names>JM</given-names></name><name><surname>Richman</surname> <given-names>EB</given-names></name><name><surname>Ren</surname> <given-names>J</given-names></name><name><surname>Newsome</surname> <given-names>WT</given-names></name><name><surname>Quake</surname> <given-names>SR</given-names></name><name><surname>Luo</surname> <given-names>L</given-names></name></person-group><year iso-8601-date="2021">2021</year><article-title>Differential encoding in prefrontal cortex projection neuron classes across cognitive tasks</article-title><source>Cell</source><volume>184</volume><fpage>489</fpage><lpage>506</lpage><pub-id pub-id-type="doi">10.1016/j.cell.2020.11.046</pub-id><pub-id pub-id-type="pmid">33338423</pub-id></element-citation></ref><ref id="bib96"><element-citation publication-type="preprint"><person-group person-group-type="author"><name><surname>Lundberg</surname> <given-names>SM</given-names></name><name><surname>Erion</surname> <given-names>GG</given-names></name><name><surname>Lee</surname> <given-names>S-I</given-names></name></person-group><year iso-8601-date="2018">2018</year><article-title>Consistent individualized feature attribution for tree ensembles</article-title><source>arXiv</source><ext-link ext-link-type="uri" xlink:href="https://arxiv.org/abs/1802.03888">https://arxiv.org/abs/1802.03888</ext-link></element-citation></ref><ref id="bib97"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Lundberg</surname> <given-names>SM</given-names></name><name><surname>Erion</surname> <given-names>G</given-names></name><name><surname>Chen</surname> <given-names>H</given-names></name><name><surname>DeGrave</surname> <given-names>A</given-names></name><name><surname>Prutkin</surname> <given-names>JM</given-names></name><name><surname>Nair</surname> <given-names>B</given-names></name><name><surname>Katz</surname> <given-names>R</given-names></name><name><surname>Himmelfarb</surname> <given-names>J</given-names></name><name><surname>Bansal</surname> <given-names>N</given-names></name><name><surname>Lee</surname> <given-names>SI</given-names></name></person-group><year iso-8601-date="2020">2020</year><article-title>From local explanations to global understanding with explainable AI for trees</article-title><source>Nature Machine Intelligence</source><volume>2</volume><fpage>56</fpage><lpage>67</lpage><pub-id pub-id-type="doi">10.1038/s42256-019-0138-9</pub-id><pub-id pub-id-type="pmid">32607472</pub-id></element-citation></ref><ref id="bib98"><element-citation publication-type="preprint"><person-group person-group-type="author"><name><surname>Lundberg</surname> <given-names>S</given-names></name><name><surname>Lee</surname> <given-names>S-I</given-names></name></person-group><year iso-8601-date="2017">2017</year><article-title>A unified approach to interpreting model predictions</article-title><source>arXiv</source><ext-link ext-link-type="uri" xlink:href="https://arxiv.org/abs/1705.07874">https://arxiv.org/abs/1705.07874</ext-link></element-citation></ref><ref id="bib99"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Maaten</surname> <given-names>L</given-names></name><name><surname>Hinton</surname> <given-names>G</given-names></name></person-group><year iso-8601-date="2008">2008</year><article-title>Visualizing data using t-sne</article-title><source>Journal of Machine Learning Research</source><volume>9</volume><fpage>2579</fpage><lpage>2605</lpage></element-citation></ref><ref id="bib100"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Mahallati</surname> <given-names>S</given-names></name><name><surname>Bezdek</surname> <given-names>JC</given-names></name><name><surname>Popovic</surname> <given-names>MR</given-names></name><name><surname>Valiante</surname> <given-names>TA</given-names></name></person-group><year iso-8601-date="2019">2019</year><article-title>Cluster tendency assessment in neuronal spike data</article-title><source>PLOS ONE</source><volume>14</volume><elocation-id>e0224547</elocation-id><pub-id pub-id-type="doi">10.1371/journal.pone.0224547</pub-id><pub-id pub-id-type="pmid">31714913</pub-id></element-citation></ref><ref id="bib101"><element-citation publication-type="preprint"><person-group person-group-type="author"><name><surname>Maheswaranathan</surname> <given-names>N</given-names></name><name><surname>Williams</surname> <given-names>AH</given-names></name><name><surname>Golub</surname> <given-names>MD</given-names></name><name><surname>Ganguli</surname> <given-names>S</given-names></name><name><surname>Sussillo</surname> <given-names>D</given-names></name></person-group><year iso-8601-date="2019">2019</year><article-title>Universality and individuality in neural dynamics across large populations of recurrent networks</article-title><source>arXiv</source><ext-link ext-link-type="uri" xlink:href="https://arxiv.org/abs/1907.08549">https://arxiv.org/abs/1907.08549</ext-link></element-citation></ref><ref id="bib102"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Mante</surname> <given-names>V</given-names></name><name><surname>Sussillo</surname> <given-names>D</given-names></name><name><surname>Shenoy</surname> <given-names>KV</given-names></name><name><surname>Newsome</surname> <given-names>WT</given-names></name></person-group><year iso-8601-date="2013">2013</year><article-title>Context-dependent computation by recurrent dynamics in prefrontal cortex</article-title><source>Nature</source><volume>503</volume><fpage>78</fpage><lpage>84</lpage><pub-id pub-id-type="doi">10.1038/nature12742</pub-id><pub-id pub-id-type="pmid">24201281</pub-id></element-citation></ref><ref id="bib103"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Markanday</surname> <given-names>A</given-names></name><name><surname>Bellet</surname> <given-names>J</given-names></name><name><surname>Bellet</surname> <given-names>ME</given-names></name><name><surname>Inoue</surname> <given-names>J</given-names></name><name><surname>Hafed</surname> <given-names>ZM</given-names></name><name><surname>Thier</surname> <given-names>P</given-names></name></person-group><year iso-8601-date="2020">2020</year><article-title>Using deep neural networks to detect complex spikes of cerebellar purkinje cells</article-title><source>Journal of Neurophysiology</source><volume>123</volume><fpage>2217</fpage><lpage>2234</lpage><pub-id pub-id-type="doi">10.1152/jn.00754.2019</pub-id><pub-id pub-id-type="pmid">32374226</pub-id></element-citation></ref><ref id="bib104"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Matelli</surname> <given-names>M</given-names></name><name><surname>Luppino</surname> <given-names>G</given-names></name></person-group><year iso-8601-date="1996">1996</year><article-title>Thalamic input to mesial and superior area 6 in the macaque monkey</article-title><source>The Journal of Comparative Neurology</source><volume>372</volume><fpage>59</fpage><lpage>87</lpage><pub-id pub-id-type="doi">10.1002/(SICI)1096-9861(19960812)372:1&lt;59::AID-CNE6&gt;3.0.CO;2-L</pub-id><pub-id pub-id-type="pmid">8841922</pub-id></element-citation></ref><ref id="bib105"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>McCormick</surname> <given-names>DA</given-names></name><name><surname>Connors</surname> <given-names>BW</given-names></name><name><surname>Lighthall</surname> <given-names>JW</given-names></name><name><surname>Prince</surname> <given-names>DA</given-names></name></person-group><year iso-8601-date="1985">1985</year><article-title>Comparative electrophysiology of pyramidal and sparsely spiny stellate neurons of the neocortex</article-title><source>Journal of Neurophysiology</source><volume>54</volume><fpage>782</fpage><lpage>806</lpage><pub-id pub-id-type="doi">10.1152/jn.1985.54.4.782</pub-id><pub-id pub-id-type="pmid">2999347</pub-id></element-citation></ref><ref id="bib106"><element-citation publication-type="preprint"><person-group person-group-type="author"><name><surname>McInnes</surname> <given-names>L</given-names></name><name><surname>Healy</surname> <given-names>J</given-names></name><name><surname>Melville</surname> <given-names>J</given-names></name></person-group><year iso-8601-date="2018">2018</year><article-title>Umap: uniform manifold approximation and projection for dimension reduction</article-title><source>arXiv</source><ext-link ext-link-type="uri" xlink:href="https://arxiv.org/abs/1802.03426">https://arxiv.org/abs/1802.03426</ext-link></element-citation></ref><ref id="bib107"><element-citation publication-type="confproc"><person-group person-group-type="author"><name><surname>McInnes</surname> <given-names>L</given-names></name></person-group><year iso-8601-date="2019">2019</year><article-title>Topological techniques for unsupervised learning</article-title><conf-name>PyData 2019</conf-name></element-citation></ref><ref id="bib108"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Medalla</surname> <given-names>M</given-names></name><name><surname>Luebke</surname> <given-names>JI</given-names></name></person-group><year iso-8601-date="2015">2015</year><article-title>Diversity of glutamatergic synaptic strength in lateral prefrontal versus primary visual cortices in the rhesus monkey</article-title><source>Journal of Neuroscience</source><volume>35</volume><fpage>112</fpage><lpage>127</lpage><pub-id pub-id-type="doi">10.1523/JNEUROSCI.3426-14.2015</pub-id><pub-id pub-id-type="pmid">25568107</pub-id></element-citation></ref><ref id="bib109"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Mehta</surname> <given-names>P</given-names></name><name><surname>Kreeger</surname> <given-names>L</given-names></name><name><surname>Wylie</surname> <given-names>DC</given-names></name><name><surname>Pattadkal</surname> <given-names>JJ</given-names></name><name><surname>Lusignan</surname> <given-names>T</given-names></name><name><surname>Davis</surname> <given-names>MJ</given-names></name><name><surname>Turi</surname> <given-names>GF</given-names></name><name><surname>Li</surname> <given-names>WK</given-names></name><name><surname>Whitmire</surname> <given-names>MP</given-names></name><name><surname>Chen</surname> <given-names>Y</given-names></name><name><surname>Kajs</surname> <given-names>BL</given-names></name><name><surname>Seidemann</surname> <given-names>E</given-names></name><name><surname>Priebe</surname> <given-names>NJ</given-names></name><name><surname>Losonczy</surname> <given-names>A</given-names></name><name><surname>Zemelman</surname> <given-names>BV</given-names></name></person-group><year iso-8601-date="2019">2019</year><article-title>Functional access to neuron subclasses in rodent and primate forebrain</article-title><source>Cell Reports</source><volume>26</volume><fpage>2818</fpage><lpage>2832</lpage><pub-id pub-id-type="doi">10.1016/j.celrep.2019.02.011</pub-id><pub-id pub-id-type="pmid">30840900</pub-id></element-citation></ref><ref id="bib110"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Meister</surname> <given-names>ML</given-names></name><name><surname>Hennig</surname> <given-names>JA</given-names></name><name><surname>Huk</surname> <given-names>AC</given-names></name></person-group><year iso-8601-date="2013">2013</year><article-title>Signal multiplexing and single-neuron computations in lateral intraparietal area during decision-making</article-title><source>Journal of Neuroscience</source><volume>33</volume><fpage>2254</fpage><lpage>2267</lpage><pub-id pub-id-type="doi">10.1523/JNEUROSCI.2984-12.2013</pub-id><pub-id pub-id-type="pmid">23392657</pub-id></element-citation></ref><ref id="bib111"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Merchant</surname> <given-names>H</given-names></name><name><surname>Naselaris</surname> <given-names>T</given-names></name><name><surname>Georgopoulos</surname> <given-names>AP</given-names></name></person-group><year iso-8601-date="2008">2008</year><article-title>Dynamic sculpting of directional tuning in the primate motor cortex during three-dimensional reaching</article-title><source>Journal of Neuroscience</source><volume>28</volume><fpage>9164</fpage><lpage>9172</lpage><pub-id pub-id-type="doi">10.1523/JNEUROSCI.1898-08.2008</pub-id><pub-id pub-id-type="pmid">18784297</pub-id></element-citation></ref><ref id="bib112"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Merchant</surname> <given-names>H</given-names></name><name><surname>de Lafuente</surname> <given-names>V</given-names></name><name><surname>Pe&#241;a-Ortega</surname> <given-names>F</given-names></name><name><surname>Larriva-Sahd</surname> <given-names>J</given-names></name></person-group><year iso-8601-date="2012">2012</year><article-title>Functional impact of interneuronal inhibition in the cerebral cortex of behaving animals</article-title><source>Progress in Neurobiology</source><volume>99</volume><fpage>163</fpage><lpage>178</lpage><pub-id pub-id-type="doi">10.1016/j.pneurobio.2012.08.005</pub-id></element-citation></ref><ref id="bib113"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Mitchell</surname> <given-names>JF</given-names></name><name><surname>Sundberg</surname> <given-names>KA</given-names></name><name><surname>Reynolds</surname> <given-names>JH</given-names></name></person-group><year iso-8601-date="2007">2007</year><article-title>Differential attention-dependent response modulation across cell classes in macaque visual area V4</article-title><source>Neuron</source><volume>55</volume><fpage>131</fpage><lpage>141</lpage><pub-id pub-id-type="doi">10.1016/j.neuron.2007.06.018</pub-id><pub-id pub-id-type="pmid">17610822</pub-id></element-citation></ref><ref id="bib114"><element-citation publication-type="book"><person-group person-group-type="author"><name><surname>Molnar</surname> <given-names>C</given-names></name></person-group><year iso-8601-date="2020">2020</year><source>Interpretable Machine Learning: A Guide for Making Black Box Models Explainable</source><publisher-name>Interpretable Machine</publisher-name></element-citation></ref><ref id="bib115"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Morecraft</surname> <given-names>RJ</given-names></name><name><surname>Cipolloni</surname> <given-names>PB</given-names></name><name><surname>Stilwell-Morecraft</surname> <given-names>KS</given-names></name><name><surname>Gedney</surname> <given-names>MT</given-names></name><name><surname>Pandya</surname> <given-names>DN</given-names></name></person-group><year iso-8601-date="2004">2004</year><article-title>Cytoarchitecture and cortical connections of the posterior cingulate and adjacent somatosensory fields in the rhesus monkey</article-title><source>The Journal of Comparative Neurology</source><volume>469</volume><fpage>37</fpage><lpage>69</lpage><pub-id pub-id-type="doi">10.1002/cne.10980</pub-id><pub-id pub-id-type="pmid">14689472</pub-id></element-citation></ref><ref id="bib116"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Morecraft</surname> <given-names>RJ</given-names></name><name><surname>Ge</surname> <given-names>J</given-names></name><name><surname>Stilwell&#8208;Morecraft</surname> <given-names>KS</given-names></name><name><surname>Rotella</surname> <given-names>DL</given-names></name><name><surname>Pizzimenti</surname> <given-names>MA</given-names></name><name><surname>Darling</surname> <given-names>WG</given-names></name></person-group><year iso-8601-date="2019">2019</year><article-title>Terminal organization of the corticospinal projection from the lateral premotor cortex to the cervical enlargement (C5&#8211;T1) in rhesus monkey</article-title><source>Journal of Comparative Neurology</source><volume>527</volume><fpage>2761</fpage><lpage>2789</lpage><pub-id pub-id-type="doi">10.1002/cne.24706</pub-id></element-citation></ref><ref id="bib117"><element-citation publication-type="preprint"><person-group person-group-type="author"><name><surname>Moscovich</surname> <given-names>A</given-names></name><name><surname>Rosset</surname> <given-names>S</given-names></name></person-group><year iso-8601-date="2019">2019</year><article-title>On the cross-validation Bias due to unsupervised pre-processing</article-title><source>arXiv</source><ext-link ext-link-type="uri" xlink:href="https://arxiv.org/abs/1901.08974">https://arxiv.org/abs/1901.08974</ext-link></element-citation></ref><ref id="bib118"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Mosher</surname> <given-names>CP</given-names></name><name><surname>Wei</surname> <given-names>Y</given-names></name><name><surname>Kami&#324;ski</surname> <given-names>J</given-names></name><name><surname>Nandi</surname> <given-names>A</given-names></name><name><surname>Mamelak</surname> <given-names>AN</given-names></name><name><surname>Anastassiou</surname> <given-names>CA</given-names></name><name><surname>Rutishauser</surname> <given-names>U</given-names></name></person-group><year iso-8601-date="2020">2020</year><article-title>Cellular classes in the human brain revealed in&#160;Vivo by Heartbeat-Related Modulation of the Extracellular Action Potential Waveform</article-title><source>Cell Reports</source><volume>30</volume><fpage>3536</fpage><lpage>3551</lpage><pub-id pub-id-type="doi">10.1016/j.celrep.2020.02.027</pub-id><pub-id pub-id-type="pmid">32160555</pub-id></element-citation></ref><ref id="bib119"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Mountcastle</surname> <given-names>VB</given-names></name><name><surname>Talbot</surname> <given-names>WH</given-names></name><name><surname>Sakata</surname> <given-names>H</given-names></name><name><surname>Hyv&#228;rinen</surname> <given-names>J</given-names></name></person-group><year iso-8601-date="1969">1969</year><article-title>Cortical neuronal mechanisms in flutter-vibration studied in unanesthetized monkeys neuronal periodicity and frequency discrimination</article-title><source>Journal of Neurophysiology</source><volume>32</volume><fpage>452</fpage><lpage>484</lpage><pub-id pub-id-type="doi">10.1152/jn.1969.32.3.452</pub-id><pub-id pub-id-type="pmid">4977839</pub-id></element-citation></ref><ref id="bib120"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Mruczek</surname> <given-names>RE</given-names></name><name><surname>Sheinberg</surname> <given-names>DL</given-names></name></person-group><year iso-8601-date="2012">2012</year><article-title>Stimulus selectivity and response latency in putative inhibitory and excitatory neurons of the primate inferior temporal cortex</article-title><source>Journal of Neurophysiology</source><volume>108</volume><fpage>2725</fpage><lpage>2736</lpage><pub-id pub-id-type="doi">10.1152/jn.00618.2012</pub-id><pub-id pub-id-type="pmid">22933717</pub-id></element-citation></ref><ref id="bib121"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Nandy</surname> <given-names>AS</given-names></name><name><surname>Nassi</surname> <given-names>JJ</given-names></name><name><surname>Reynolds</surname> <given-names>JH</given-names></name></person-group><year iso-8601-date="2017">2017</year><article-title>Laminar organization of attentional modulation in macaque visual area V4</article-title><source>Neuron</source><volume>93</volume><fpage>235</fpage><lpage>246</lpage><pub-id pub-id-type="doi">10.1016/j.neuron.2016.11.029</pub-id><pub-id pub-id-type="pmid">27989456</pub-id></element-citation></ref><ref id="bib122"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Newman</surname> <given-names>MEJ</given-names></name><name><surname>Girvan</surname> <given-names>M</given-names></name></person-group><year iso-8601-date="2004">2004</year><article-title>Finding and evaluating community structure in networks</article-title><source>Physical Review E</source><volume>69</volume><elocation-id>026113</elocation-id><pub-id pub-id-type="doi">10.1103/PhysRevE.69.026113</pub-id></element-citation></ref><ref id="bib123"><element-citation publication-type="software"><person-group person-group-type="author"><name><surname>Noichl</surname> <given-names>M</given-names></name></person-group><year iso-8601-date="2019">2019</year><data-title>Examples for UMAP reduction using 3D models of prehistoric animals</data-title><source>GitHub</source><ext-link ext-link-type="uri" xlink:href="https://github.com/MNoichl/UMAP-examples-mammoth-">https://github.com/MNoichl/UMAP-examples-mammoth-</ext-link></element-citation></ref><ref id="bib124"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Noichl</surname> <given-names>M</given-names></name></person-group><year iso-8601-date="2021">2021</year><article-title>Modeling the structure of recent philosophy</article-title><source>Synthese</source><volume>198</volume><fpage>5089</fpage><lpage>5100</lpage><pub-id pub-id-type="doi">10.1007/s11229-019-02390-8</pub-id></element-citation></ref><ref id="bib125"><element-citation publication-type="preprint"><person-group person-group-type="author"><name><surname>Nolet</surname> <given-names>CJ</given-names></name><name><surname>Lafargue</surname> <given-names>V</given-names></name><name><surname>Raff</surname> <given-names>E</given-names></name><name><surname>Nanditale</surname> <given-names>T</given-names></name><name><surname>Oates</surname> <given-names>T</given-names></name><name><surname>Zedlewski</surname> <given-names>J</given-names></name><name><surname>Patterson</surname> <given-names>J</given-names></name></person-group><year iso-8601-date="2020">2020</year><article-title>Bringing UMAP closer to the speed of light with GPU acceleration</article-title><source>arXiv</source><ext-link ext-link-type="uri" xlink:href="https://arxiv.org/abs/2008.00325">https://arxiv.org/abs/2008.00325</ext-link></element-citation></ref><ref id="bib126"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Nowak</surname> <given-names>LG</given-names></name><name><surname>Azouz</surname> <given-names>R</given-names></name><name><surname>Sanchez-Vives</surname> <given-names>MV</given-names></name><name><surname>Gray</surname> <given-names>CM</given-names></name><name><surname>McCormick</surname> <given-names>DA</given-names></name></person-group><year iso-8601-date="2003">2003</year><article-title>Electrophysiological classes of cat primary visual cortical neurons in vivo as revealed by quantitative analyses</article-title><source>Journal of Neurophysiology</source><volume>89</volume><fpage>1541</fpage><lpage>1566</lpage><pub-id pub-id-type="doi">10.1152/jn.00580.2002</pub-id><pub-id pub-id-type="pmid">12626627</pub-id></element-citation></ref><ref id="bib127"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Onorato</surname> <given-names>I</given-names></name><name><surname>Neuenschwander</surname> <given-names>S</given-names></name><name><surname>Hoy</surname> <given-names>J</given-names></name><name><surname>Lima</surname> <given-names>B</given-names></name><name><surname>Rocha</surname> <given-names>KS</given-names></name><name><surname>Broggini</surname> <given-names>AC</given-names></name><name><surname>Uran</surname> <given-names>C</given-names></name><name><surname>Spyropoulos</surname> <given-names>G</given-names></name><name><surname>Klon-Lipok</surname> <given-names>J</given-names></name><name><surname>Womelsdorf</surname> <given-names>T</given-names></name><name><surname>Fries</surname> <given-names>P</given-names></name><name><surname>Niell</surname> <given-names>C</given-names></name><name><surname>Singer</surname> <given-names>W</given-names></name><name><surname>Vinck</surname> <given-names>M</given-names></name></person-group><year iso-8601-date="2020">2020</year><article-title>A distinct class of bursting neurons with strong gamma synchronization and stimulus selectivity in monkey V1</article-title><source>Neuron</source><volume>105</volume><fpage>180</fpage><lpage>197</lpage><pub-id pub-id-type="doi">10.1016/j.neuron.2019.09.039</pub-id><pub-id pub-id-type="pmid">31732258</pub-id></element-citation></ref><ref id="bib128"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Pandarinath</surname> <given-names>C</given-names></name><name><surname>O'Shea</surname> <given-names>DJ</given-names></name><name><surname>Collins</surname> <given-names>J</given-names></name><name><surname>Jozefowicz</surname> <given-names>R</given-names></name><name><surname>Stavisky</surname> <given-names>SD</given-names></name><name><surname>Kao</surname> <given-names>JC</given-names></name><name><surname>Trautmann</surname> <given-names>EM</given-names></name><name><surname>Kaufman</surname> <given-names>MT</given-names></name><name><surname>Ryu</surname> <given-names>SI</given-names></name><name><surname>Hochberg</surname> <given-names>LR</given-names></name><name><surname>Henderson</surname> <given-names>JM</given-names></name><name><surname>Shenoy</surname> <given-names>KV</given-names></name><name><surname>Abbott</surname> <given-names>LF</given-names></name><name><surname>Sussillo</surname> <given-names>D</given-names></name></person-group><year iso-8601-date="2018">2018</year><article-title>Inferring single-trial neural population dynamics using sequential auto-encoders</article-title><source>Nature Methods</source><volume>15</volume><fpage>805</fpage><lpage>815</lpage><pub-id pub-id-type="doi">10.1038/s41592-018-0109-9</pub-id><pub-id pub-id-type="pmid">30224673</pub-id></element-citation></ref><ref id="bib129"><element-citation publication-type="preprint"><person-group person-group-type="author"><name><surname>Paulk</surname> <given-names>AC</given-names></name><name><surname>Kfir</surname> <given-names>Y</given-names></name><name><surname>Khanna</surname> <given-names>A</given-names></name><name><surname>Mustroph</surname> <given-names>M</given-names></name><name><surname>Trautmann</surname> <given-names>EM</given-names></name><name><surname>Soper</surname> <given-names>DJ</given-names></name><name><surname>Stavisky</surname> <given-names>SD</given-names></name><name><surname>Welkenhuysen</surname> <given-names>M</given-names></name><name><surname>Dutta</surname> <given-names>B</given-names></name><name><surname>Shenoy</surname> <given-names>KV</given-names></name><name><surname>Hochberg</surname> <given-names>LR</given-names></name><name><surname>Richardson</surname> <given-names>M</given-names></name></person-group><year iso-8601-date="2021">2021</year><article-title>Large-Scale neural recordings with Single-Cell resolution in human cortex using High-Density neuropixels probes</article-title><source>bioRxiv</source><pub-id pub-id-type="doi">10.1101/2021.06.20.449152</pub-id></element-citation></ref><ref id="bib130"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Pinto</surname> <given-names>L</given-names></name><name><surname>Dan</surname> <given-names>Y</given-names></name></person-group><year iso-8601-date="2015">2015</year><article-title>Cell-Type-Specific activity in prefrontal cortex during Goal-Directed behavior</article-title><source>Neuron</source><volume>87</volume><fpage>437</fpage><lpage>450</lpage><pub-id pub-id-type="doi">10.1016/j.neuron.2015.06.021</pub-id><pub-id pub-id-type="pmid">26143660</pub-id></element-citation></ref><ref id="bib131"><element-citation publication-type="preprint"><person-group person-group-type="author"><name><surname>Porter</surname> <given-names>MA</given-names></name><name><surname>Onnela</surname> <given-names>J-P</given-names></name><name><surname>Mucha</surname> <given-names>PJ</given-names></name></person-group><year iso-8601-date="2009">2009</year><article-title>Communities in networks</article-title><source>arXiv</source><ext-link ext-link-type="uri" xlink:href="https://arxiv.org/abs/0902.3788">https://arxiv.org/abs/0902.3788</ext-link></element-citation></ref><ref id="bib132"><element-citation publication-type="preprint"><person-group person-group-type="author"><name><surname>Poulin</surname> <given-names>V</given-names></name><name><surname>Th&#233;berge</surname> <given-names>F</given-names></name></person-group><year iso-8601-date="2018">2018</year><article-title>Ensemble clustering for graphs</article-title><source>arXiv</source><ext-link ext-link-type="uri" xlink:href="https://arxiv.org/abs/1809.05578">https://arxiv.org/abs/1809.05578</ext-link></element-citation></ref><ref id="bib133"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Poulin</surname> <given-names>V</given-names></name><name><surname>Th&#233;berge</surname> <given-names>F</given-names></name></person-group><year iso-8601-date="2019">2019</year><article-title>Ensemble clustering for graphs: comparisons and applications</article-title><source>Applied Network Science</source><volume>4</volume><elocation-id>51</elocation-id><pub-id pub-id-type="doi">10.1007/s41109-019-0162-z</pub-id></element-citation></ref><ref id="bib134"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Povysheva</surname> <given-names>NV</given-names></name><name><surname>Zaitsev</surname> <given-names>AV</given-names></name><name><surname>Gonzalez-Burgos</surname> <given-names>G</given-names></name><name><surname>Lewis</surname> <given-names>DA</given-names></name></person-group><year iso-8601-date="2013">2013</year><article-title>Electrophysiological heterogeneity of Fast-Spiking interneurons: chandelier versus basket cells</article-title><source>PLOS ONE</source><volume>8</volume><elocation-id>e70553</elocation-id><pub-id pub-id-type="doi">10.1371/journal.pone.0070553</pub-id><pub-id pub-id-type="pmid">23950961</pub-id></element-citation></ref><ref id="bib135"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Quirk</surname> <given-names>MC</given-names></name><name><surname>Sosulski</surname> <given-names>DL</given-names></name><name><surname>Feierstein</surname> <given-names>CE</given-names></name><name><surname>Uchida</surname> <given-names>N</given-names></name><name><surname>Mainen</surname> <given-names>ZF</given-names></name></person-group><year iso-8601-date="2009">2009</year><article-title>A defined network of fast-spiking interneurons in orbitofrontal cortex: responses to behavioral contingencies and ketamine administration</article-title><source>Frontiers in Systems Neuroscience</source><volume>3</volume><elocation-id>13</elocation-id><pub-id pub-id-type="doi">10.3389/neuro.06.013.2009</pub-id><pub-id pub-id-type="pmid">20057934</pub-id></element-citation></ref><ref id="bib136"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Reimann</surname> <given-names>MW</given-names></name><name><surname>Anastassiou</surname> <given-names>CA</given-names></name><name><surname>Perin</surname> <given-names>R</given-names></name><name><surname>Hill</surname> <given-names>SL</given-names></name><name><surname>Markram</surname> <given-names>H</given-names></name><name><surname>Koch</surname> <given-names>C</given-names></name></person-group><year iso-8601-date="2013">2013</year><article-title>A biophysically detailed model of neocortical local field potentials predicts the critical role of active membrane currents</article-title><source>Neuron</source><volume>79</volume><fpage>375</fpage><lpage>390</lpage><pub-id pub-id-type="doi">10.1016/j.neuron.2013.05.023</pub-id><pub-id pub-id-type="pmid">23889937</pub-id></element-citation></ref><ref id="bib137"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Remington</surname> <given-names>ED</given-names></name><name><surname>Narain</surname> <given-names>D</given-names></name><name><surname>Hosseini</surname> <given-names>EA</given-names></name><name><surname>Jazayeri</surname> <given-names>M</given-names></name></person-group><year iso-8601-date="2018">2018</year><article-title>Flexible sensorimotor computations through rapid reconfiguration of cortical dynamics</article-title><source>Neuron</source><volume>98</volume><fpage>1005</fpage><lpage>1019</lpage><pub-id pub-id-type="doi">10.1016/j.neuron.2018.05.020</pub-id><pub-id pub-id-type="pmid">29879384</pub-id></element-citation></ref><ref id="bib138"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Rizzolatti</surname> <given-names>G</given-names></name><name><surname>Luppino</surname> <given-names>G</given-names></name><name><surname>Matelli</surname> <given-names>M</given-names></name></person-group><year iso-8601-date="1998">1998</year><article-title>The organization of the cortical motor system: new concepts</article-title><source>Electroencephalography and Clinical Neurophysiology</source><volume>106</volume><fpage>283</fpage><lpage>296</lpage><pub-id pub-id-type="doi">10.1016/s0013-4694(98)00022-4</pub-id><pub-id pub-id-type="pmid">9741757</pub-id></element-citation></ref><ref id="bib139"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Robbins</surname> <given-names>AA</given-names></name><name><surname>Fox</surname> <given-names>SE</given-names></name><name><surname>Holmes</surname> <given-names>GL</given-names></name><name><surname>Scott</surname> <given-names>RC</given-names></name><name><surname>Barry</surname> <given-names>JM</given-names></name></person-group><year iso-8601-date="2013">2013</year><article-title>Short duration waveforms recorded extracellularly from freely moving rats are representative of axonal activity</article-title><source>Frontiers in Neural Circuits</source><volume>7</volume><elocation-id>181</elocation-id><pub-id pub-id-type="doi">10.3389/fncir.2013.00181</pub-id><pub-id pub-id-type="pmid">24348338</pub-id></element-citation></ref><ref id="bib140"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Roitman</surname> <given-names>JD</given-names></name><name><surname>Shadlen</surname> <given-names>MN</given-names></name></person-group><year iso-8601-date="2002">2002</year><article-title>Response of neurons in the lateral intraparietal area during a combined visual discrimination reaction time task</article-title><source>The Journal of Neuroscience</source><volume>22</volume><fpage>9475</fpage><lpage>9489</lpage><pub-id pub-id-type="doi">10.1523/JNEUROSCI.22-21-09475.2002</pub-id><pub-id pub-id-type="pmid">12417672</pub-id></element-citation></ref><ref id="bib141"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Rosene</surname> <given-names>DL</given-names></name><name><surname>Roy</surname> <given-names>NJ</given-names></name><name><surname>Davis</surname> <given-names>BJ</given-names></name></person-group><year iso-8601-date="1986">1986</year><article-title>A cryoprotection method that facilitates cutting frozen sections of whole monkey brains for histological and histochemical processing without freezing artifact</article-title><source>Journal of Histochemistry &amp; Cytochemistry</source><volume>34</volume><fpage>1301</fpage><lpage>1315</lpage><pub-id pub-id-type="doi">10.1177/34.10.3745909</pub-id><pub-id pub-id-type="pmid">3745909</pub-id></element-citation></ref><ref id="bib142"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Roux</surname> <given-names>L</given-names></name><name><surname>Stark</surname> <given-names>E</given-names></name><name><surname>Sjulson</surname> <given-names>L</given-names></name><name><surname>Buzs&#225;ki</surname> <given-names>G</given-names></name></person-group><year iso-8601-date="2014">2014</year><article-title>In vivo optogenetic identification and manipulation of GABAergic interneuron subtypes</article-title><source>Current Opinion in Neurobiology</source><volume>26</volume><fpage>88</fpage><lpage>95</lpage><pub-id pub-id-type="doi">10.1016/j.conb.2013.12.013</pub-id><pub-id pub-id-type="pmid">24440414</pub-id></element-citation></ref><ref id="bib143"><element-citation publication-type="preprint"><person-group person-group-type="author"><name><surname>Sainburg</surname> <given-names>T</given-names></name><name><surname>McInnes</surname> <given-names>L</given-names></name><name><surname>Gentner</surname> <given-names>TQ</given-names></name></person-group><year iso-8601-date="2020">2020</year><article-title>Parametric UMAP: learning embeddings with deep neural networks for representation and Semi-Supervised learning.</article-title><source>arXiv</source><ext-link ext-link-type="uri" xlink:href="https://arxiv.org/abs/2009.12981">https://arxiv.org/abs/2009.12981</ext-link></element-citation></ref><ref id="bib144"><element-citation publication-type="preprint"><person-group person-group-type="author"><name><surname>Saleh</surname> <given-names>MS</given-names></name><name><surname>Ritchie</surname> <given-names>SM</given-names></name><name><surname>Nicholas</surname> <given-names>MA</given-names></name><name><surname>Bezbaruah</surname> <given-names>R</given-names></name><name><surname>Reddy</surname> <given-names>JW</given-names></name><name><surname>Chamanzar</surname> <given-names>M</given-names></name><name><surname>Yttri</surname> <given-names>EA</given-names></name><name><surname>Panat</surname> <given-names>RP</given-names></name></person-group><year iso-8601-date="2019">2019</year><article-title>CMU array: a 3D Nano-Printed, customizable Ultra-High-Density microelectrode array platform</article-title><source>bioRxiv</source><pub-id pub-id-type="doi">10.1101/742346</pub-id></element-citation></ref><ref id="bib145"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Schindelin</surname> <given-names>J</given-names></name><name><surname>Arganda-Carreras</surname> <given-names>I</given-names></name><name><surname>Frise</surname> <given-names>E</given-names></name><name><surname>Kaynig</surname> <given-names>V</given-names></name><name><surname>Longair</surname> <given-names>M</given-names></name><name><surname>Pietzsch</surname> <given-names>T</given-names></name><name><surname>Preibisch</surname> <given-names>S</given-names></name><name><surname>Rueden</surname> <given-names>C</given-names></name><name><surname>Saalfeld</surname> <given-names>S</given-names></name><name><surname>Schmid</surname> <given-names>B</given-names></name><name><surname>Tinevez</surname> <given-names>JY</given-names></name><name><surname>White</surname> <given-names>DJ</given-names></name><name><surname>Hartenstein</surname> <given-names>V</given-names></name><name><surname>Eliceiri</surname> <given-names>K</given-names></name><name><surname>Tomancak</surname> <given-names>P</given-names></name><name><surname>Cardona</surname> <given-names>A</given-names></name></person-group><year iso-8601-date="2012">2012</year><article-title>Fiji: an open-source platform for biological-image analysis</article-title><source>Nature Methods</source><volume>9</volume><fpage>676</fpage><lpage>682</lpage><pub-id pub-id-type="doi">10.1038/nmeth.2019</pub-id><pub-id pub-id-type="pmid">22743772</pub-id></element-citation></ref><ref id="bib146"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Schmitz</surname> <given-names>C</given-names></name><name><surname>Eastwood</surname> <given-names>BS</given-names></name><name><surname>Tappan</surname> <given-names>SJ</given-names></name><name><surname>Glaser</surname> <given-names>JR</given-names></name><name><surname>Peterson</surname> <given-names>DA</given-names></name><name><surname>Hof</surname> <given-names>PR</given-names></name></person-group><year iso-8601-date="2014">2014</year><article-title>Current automated 3D cell detection methods are not a suitable replacement for manual stereologic cell counting</article-title><source>Frontiers in Neuroanatomy</source><volume>8</volume><fpage>1</fpage><lpage>14</lpage><pub-id pub-id-type="doi">10.3389/fnana.2014.00027</pub-id><pub-id pub-id-type="pmid">24847213</pub-id></element-citation></ref><ref id="bib147"><element-citation publication-type="journal"><person-group person-group-type="author"><collab>SciPy 1.0 Contributors</collab><name><surname>Virtanen</surname> <given-names>P</given-names></name><name><surname>Gommers</surname> <given-names>R</given-names></name><name><surname>Oliphant</surname> <given-names>TE</given-names></name><name><surname>Haberland</surname> <given-names>M</given-names></name><name><surname>Reddy</surname> <given-names>T</given-names></name><name><surname>Cournapeau</surname> <given-names>D</given-names></name><name><surname>Burovski</surname> <given-names>E</given-names></name><name><surname>Peterson</surname> <given-names>P</given-names></name><name><surname>Weckesser</surname> <given-names>W</given-names></name><name><surname>Bright</surname> <given-names>J</given-names></name><name><surname>van der Walt</surname> <given-names>SJ</given-names></name><name><surname>Brett</surname> <given-names>M</given-names></name><name><surname>Wilson</surname> <given-names>J</given-names></name><name><surname>Millman</surname> <given-names>KJ</given-names></name><name><surname>Mayorov</surname> <given-names>N</given-names></name><name><surname>Nelson</surname> <given-names>ARJ</given-names></name><name><surname>Jones</surname> <given-names>E</given-names></name><name><surname>Kern</surname> <given-names>R</given-names></name><name><surname>Larson</surname> <given-names>E</given-names></name><name><surname>Carey</surname> <given-names>CJ</given-names></name><name><surname>Polat</surname> <given-names>&#304;</given-names></name><name><surname>Feng</surname> <given-names>Y</given-names></name><name><surname>Moore</surname> <given-names>EW</given-names></name><name><surname>VanderPlas</surname> <given-names>J</given-names></name><name><surname>Laxalde</surname> <given-names>D</given-names></name><name><surname>Perktold</surname> <given-names>J</given-names></name><name><surname>Cimrman</surname> <given-names>R</given-names></name><name><surname>Henriksen</surname> <given-names>I</given-names></name><name><surname>Quintero</surname> <given-names>EA</given-names></name><name><surname>Harris</surname> <given-names>CR</given-names></name><name><surname>Archibald</surname> <given-names>AM</given-names></name><name><surname>Ribeiro</surname> <given-names>AH</given-names></name><name><surname>Pedregosa</surname> <given-names>F</given-names></name><name><surname>van Mulbregt</surname> <given-names>P</given-names></name></person-group><year iso-8601-date="2020">2020</year><article-title>SciPy 1.0: fundamental algorithms for scientific computing in Python</article-title><source>Nature Methods</source><volume>17</volume><fpage>261</fpage><lpage>272</lpage><pub-id pub-id-type="doi">10.1038/s41592-019-0686-2</pub-id><pub-id pub-id-type="pmid">32015543</pub-id></element-citation></ref><ref id="bib148"><element-citation publication-type="confproc"><person-group person-group-type="author"><name><surname>Seabold</surname> <given-names>S</given-names></name><name><surname>Perktold</surname> <given-names>J</given-names></name></person-group><year iso-8601-date="2010">2010</year><article-title>Statsmodels: econometric and statistical modeling with Python</article-title><conf-name>9th Python in Science Conference</conf-name><pub-id pub-id-type="doi">10.25080/Majora-92bf1922-011</pub-id></element-citation></ref><ref id="bib149"><element-citation publication-type="preprint"><person-group person-group-type="author"><name><surname>Sedaghat-Nejad</surname> <given-names>E</given-names></name><name><surname>Fakharian</surname> <given-names>MA</given-names></name><name><surname>Pi</surname> <given-names>J</given-names></name><name><surname>Hage</surname> <given-names>P</given-names></name><name><surname>Kojima</surname> <given-names>Y</given-names></name><name><surname>Soetedjo</surname> <given-names>R</given-names></name><name><surname>Ohmae</surname> <given-names>S</given-names></name><name><surname>Medina</surname> <given-names>JF</given-names></name><name><surname>Shadmehr</surname> <given-names>R</given-names></name></person-group><year iso-8601-date="2021">2021</year><article-title>P-sort: an open-source software for cerebellar neurophysiology</article-title><source>bioRxiv</source><pub-id pub-id-type="doi">10.1101/2021.03.16.435644</pub-id></element-citation></ref><ref id="bib150"><element-citation publication-type="book"><person-group person-group-type="author"><name><surname>Shapley</surname> <given-names>LS</given-names></name></person-group><year iso-8601-date="1988">1988</year><source>A Value for N-Person Games</source><publisher-name>Cambridge University Press</publisher-name><pub-id pub-id-type="doi">10.7249/P0295</pub-id></element-citation></ref><ref id="bib151"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Shenoy</surname> <given-names>KV</given-names></name><name><surname>Sahani</surname> <given-names>M</given-names></name><name><surname>Churchland</surname> <given-names>MM</given-names></name></person-group><year iso-8601-date="2013">2013</year><article-title>Cortical control of arm movements: a dynamical systems perspective</article-title><source>Annual Review of Neuroscience</source><volume>36</volume><fpage>337</fpage><lpage>359</lpage><pub-id pub-id-type="doi">10.1146/annurev-neuro-062111-150509</pub-id><pub-id pub-id-type="pmid">23725001</pub-id></element-citation></ref><ref id="bib152"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Simons</surname> <given-names>DJ</given-names></name></person-group><year iso-8601-date="1978">1978</year><article-title>Response properties of vibrissa units in rat SI somatosensory neocortex</article-title><source>Journal of Neurophysiology</source><volume>41</volume><fpage>798</fpage><lpage>820</lpage><pub-id pub-id-type="doi">10.1152/jn.1978.41.3.798</pub-id><pub-id pub-id-type="pmid">660231</pub-id></element-citation></ref><ref id="bib153"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Snyder</surname> <given-names>AC</given-names></name><name><surname>Morais</surname> <given-names>MJ</given-names></name><name><surname>Smith</surname> <given-names>MA</given-names></name></person-group><year iso-8601-date="2016">2016</year><article-title>Dynamics of excitatory and inhibitory networks are differentially altered by selective attention</article-title><source>Journal of Neurophysiology</source><volume>116</volume><fpage>1807</fpage><lpage>1820</lpage><pub-id pub-id-type="doi">10.1152/jn.00343.2016</pub-id><pub-id pub-id-type="pmid">27466133</pub-id></element-citation></ref><ref id="bib154"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Soares</surname> <given-names>D</given-names></name><name><surname>Goldrick</surname> <given-names>I</given-names></name><name><surname>Lemon</surname> <given-names>RN</given-names></name><name><surname>Kraskov</surname> <given-names>A</given-names></name><name><surname>Greensmith</surname> <given-names>L</given-names></name><name><surname>Kalmar</surname> <given-names>B</given-names></name></person-group><year iso-8601-date="2017">2017</year><article-title>Expression of Kv3.1b potassium channel is widespread in macaque motor cortex pyramidal cells: A histological comparison between rat and macaque</article-title><source>Journal of Comparative Neurology</source><volume>525</volume><fpage>2164</fpage><lpage>2174</lpage><pub-id pub-id-type="doi">10.1002/cne.24192</pub-id></element-citation></ref><ref id="bib155"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Song</surname> <given-names>JH</given-names></name><name><surname>McPeek</surname> <given-names>RM</given-names></name></person-group><year iso-8601-date="2010">2010</year><article-title>Roles of narrow- and broad-spiking dorsal premotor area neurons in reach target selection and movement production</article-title><source>Journal of Neurophysiology</source><volume>103</volume><fpage>2124</fpage><lpage>2138</lpage><pub-id pub-id-type="doi">10.1152/jn.00238.2009</pub-id><pub-id pub-id-type="pmid">20164405</pub-id></element-citation></ref><ref id="bib156"><element-citation publication-type="book"><person-group person-group-type="author"><name><surname>Spivak</surname> <given-names>DI</given-names></name></person-group><year iso-8601-date="2009">2009</year><source>Metric Realization of Fuzzy Simplicial Sets</source><publisher-name>Metric space</publisher-name></element-citation></ref><ref id="bib157"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Steriade</surname> <given-names>M</given-names></name><name><surname>Timofeev</surname> <given-names>I</given-names></name><name><surname>D&#252;rm&#252;ller</surname> <given-names>N</given-names></name><name><surname>Grenier</surname> <given-names>F</given-names></name></person-group><year iso-8601-date="1998">1998</year><article-title>Dynamic properties of corticothalamic neurons and local cortical interneurons generating fast rhythmic (30-40 hz) spike bursts</article-title><source>Journal of Neurophysiology</source><volume>79</volume><fpage>483</fpage><lpage>490</lpage><pub-id pub-id-type="doi">10.1152/jn.1998.79.1.483</pub-id><pub-id pub-id-type="pmid">9425218</pub-id></element-citation></ref><ref id="bib158"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Steriade</surname> <given-names>M</given-names></name></person-group><year iso-8601-date="2004">2004</year><article-title>Neocortical cell classes are flexible entities</article-title><source>Nature Reviews. Neuroscience</source><volume>5</volume><fpage>121</fpage><lpage>134</lpage><pub-id pub-id-type="doi">10.1038/nrn1325</pub-id><pub-id pub-id-type="pmid">14735115</pub-id></element-citation></ref><ref id="bib159"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>&#352;trumbelj</surname> <given-names>E</given-names></name><name><surname>Kononenko</surname> <given-names>I</given-names></name></person-group><year iso-8601-date="2014">2014</year><article-title>Explaining prediction models and individual predictions with feature contributions</article-title><source>Knowledge and Information Systems</source><volume>41</volume><fpage>647</fpage><lpage>665</lpage><pub-id pub-id-type="doi">10.1007/s10115-013-0679-x</pub-id></element-citation></ref><ref id="bib160"><element-citation publication-type="software"><person-group person-group-type="author"><name><surname>Stuttgen</surname> <given-names>M</given-names></name></person-group><year iso-8601-date="2019">2019</year><data-title>Mlib - Toolbox for Analyzing Spike Data</data-title><version designator="1.7.0.0">1.7.0.0</version><publisher-name>Mathworks</publisher-name><ext-link ext-link-type="uri" xlink:href="https://www.mathworks.com/matlabcentral/fileexchange/37339-mlib-toolbox-for-analyzing-spike-data">https://www.mathworks.com/matlabcentral/fileexchange/37339-mlib-toolbox-for-analyzing-spike-data</ext-link></element-citation></ref><ref id="bib161"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Sun</surname> <given-names>SH</given-names></name><name><surname>Almasi</surname> <given-names>A</given-names></name><name><surname>Yunzab</surname> <given-names>M</given-names></name><name><surname>Zehra</surname> <given-names>S</given-names></name><name><surname>Hicks</surname> <given-names>DG</given-names></name><name><surname>Kameneva</surname> <given-names>T</given-names></name><name><surname>Ibbotson</surname> <given-names>MR</given-names></name><name><surname>Meffin</surname> <given-names>H</given-names></name></person-group><year iso-8601-date="2021">2021</year><article-title>Analysis of extracellular spike waveforms and associated receptive fields of neurons in cat primary visual cortex</article-title><source>The Journal of Physiology</source><volume>599</volume><fpage>2211</fpage><lpage>2238</lpage><pub-id pub-id-type="doi">10.1113/JP280844</pub-id><pub-id pub-id-type="pmid">33501669</pub-id></element-citation></ref><ref id="bib162"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Tasic</surname> <given-names>B</given-names></name><name><surname>Yao</surname> <given-names>Z</given-names></name><name><surname>Graybuck</surname> <given-names>LT</given-names></name><name><surname>Smith</surname> <given-names>KA</given-names></name><name><surname>Nguyen</surname> <given-names>TN</given-names></name><name><surname>Bertagnolli</surname> <given-names>D</given-names></name><name><surname>Goldy</surname> <given-names>J</given-names></name><name><surname>Garren</surname> <given-names>E</given-names></name><name><surname>Economo</surname> <given-names>MN</given-names></name><name><surname>Viswanathan</surname> <given-names>S</given-names></name><name><surname>Penn</surname> <given-names>O</given-names></name><name><surname>Bakken</surname> <given-names>T</given-names></name><name><surname>Menon</surname> <given-names>V</given-names></name><name><surname>Miller</surname> <given-names>J</given-names></name><name><surname>Fong</surname> <given-names>O</given-names></name><name><surname>Hirokawa</surname> <given-names>KE</given-names></name><name><surname>Lathia</surname> <given-names>K</given-names></name><name><surname>Rimorin</surname> <given-names>C</given-names></name><name><surname>Tieu</surname> <given-names>M</given-names></name><name><surname>Larsen</surname> <given-names>R</given-names></name><name><surname>Casper</surname> <given-names>T</given-names></name><name><surname>Barkan</surname> <given-names>E</given-names></name><name><surname>Kroll</surname> <given-names>M</given-names></name><name><surname>Parry</surname> <given-names>S</given-names></name><name><surname>Shapovalova</surname> <given-names>NV</given-names></name><name><surname>Hirschstein</surname> <given-names>D</given-names></name><name><surname>Pendergraft</surname> <given-names>J</given-names></name><name><surname>Sullivan</surname> <given-names>HA</given-names></name><name><surname>Kim</surname> <given-names>TK</given-names></name><name><surname>Szafer</surname> <given-names>A</given-names></name><name><surname>Dee</surname> <given-names>N</given-names></name><name><surname>Groblewski</surname> <given-names>P</given-names></name><name><surname>Wickersham</surname> <given-names>I</given-names></name><name><surname>Cetin</surname> <given-names>A</given-names></name><name><surname>Harris</surname> <given-names>JA</given-names></name><name><surname>Levi</surname> <given-names>BP</given-names></name><name><surname>Sunkin</surname> <given-names>SM</given-names></name><name><surname>Madisen</surname> <given-names>L</given-names></name><name><surname>Daigle</surname> <given-names>TL</given-names></name><name><surname>Looger</surname> <given-names>L</given-names></name><name><surname>Bernard</surname> <given-names>A</given-names></name><name><surname>Phillips</surname> <given-names>J</given-names></name><name><surname>Lein</surname> <given-names>E</given-names></name><name><surname>Hawrylycz</surname> <given-names>M</given-names></name><name><surname>Svoboda</surname> <given-names>K</given-names></name><name><surname>Jones</surname> <given-names>AR</given-names></name><name><surname>Koch</surname> <given-names>C</given-names></name><name><surname>Zeng</surname> <given-names>H</given-names></name></person-group><year iso-8601-date="2018">2018</year><article-title>Shared and distinct transcriptomic cell types across neocortical Areas</article-title><source>Nature</source><volume>563</volume><fpage>72</fpage><lpage>78</lpage><pub-id pub-id-type="doi">10.1038/s41586-018-0654-5</pub-id><pub-id pub-id-type="pmid">30382198</pub-id></element-citation></ref><ref id="bib163"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Tenenbaum</surname> <given-names>JB</given-names></name><name><surname>de Silva</surname> <given-names>V</given-names></name><name><surname>Langford</surname> <given-names>JC</given-names></name></person-group><year iso-8601-date="2000">2000</year><article-title>A global geometric framework for nonlinear dimensionality reduction</article-title><source>Science</source><volume>290</volume><fpage>2319</fpage><lpage>2323</lpage><pub-id pub-id-type="doi">10.1126/science.290.5500.2319</pub-id><pub-id pub-id-type="pmid">11125149</pub-id></element-citation></ref><ref id="bib164"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Terpilowski</surname> <given-names>M</given-names></name></person-group><year iso-8601-date="2019">2019</year><article-title>scikit-posthocs: pairwise multiple comparison tests in Python</article-title><source>Journal of Open Source Software</source><volume>4</volume><elocation-id>1169</elocation-id><pub-id pub-id-type="doi">10.21105/joss.01169</pub-id></element-citation></ref><ref id="bib165"><element-citation publication-type="software"><person-group person-group-type="author"><name><surname>Th&#233;berge</surname> <given-names>F</given-names></name></person-group><year iso-8601-date="2020">2020</year><data-title>Ensemble-Clustering-for-Graphs</data-title><source>GitHub</source><ext-link ext-link-type="uri" xlink:href="https://github.com/ftheberge/Ensemble-Clustering-for-Graphs">https://github.com/ftheberge/Ensemble-Clustering-for-Graphs</ext-link></element-citation></ref><ref id="bib166"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Thura</surname> <given-names>D</given-names></name><name><surname>Cisek</surname> <given-names>P</given-names></name></person-group><year iso-8601-date="2014">2014</year><article-title>Deliberation and commitment in the premotor and primary motor cortex during dynamic decision making</article-title><source>Neuron</source><volume>81</volume><fpage>1401</fpage><lpage>1416</lpage><pub-id pub-id-type="doi">10.1016/j.neuron.2014.01.031</pub-id><pub-id pub-id-type="pmid">24656257</pub-id></element-citation></ref><ref id="bib167"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Tibshirani</surname> <given-names>R</given-names></name><name><surname>Walther</surname> <given-names>G</given-names></name></person-group><year iso-8601-date="2005">2005</year><article-title>Cluster validation by prediction strength</article-title><source>Journal of Computational and Graphical Statistics</source><volume>14</volume><fpage>511</fpage><lpage>528</lpage><pub-id pub-id-type="doi">10.1198/106186005X59243</pub-id></element-citation></ref><ref id="bib168"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Timme</surname> <given-names>NM</given-names></name><name><surname>Lapish</surname> <given-names>C</given-names></name></person-group><year iso-8601-date="2018">2018</year><article-title>A tutorial for information theory in neuroscience</article-title><source>Eneuro</source><volume>5</volume><elocation-id>ENEURO.0052-18.2018</elocation-id><pub-id pub-id-type="doi">10.1523/ENEURO.0052-18.2018</pub-id><pub-id pub-id-type="pmid">30211307</pub-id></element-citation></ref><ref id="bib169"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Tosches</surname> <given-names>MA</given-names></name><name><surname>Yamawaki</surname> <given-names>TM</given-names></name><name><surname>Naumann</surname> <given-names>RK</given-names></name><name><surname>Jacobi</surname> <given-names>AA</given-names></name><name><surname>Tushev</surname> <given-names>G</given-names></name><name><surname>Laurent</surname> <given-names>G</given-names></name></person-group><year iso-8601-date="2018">2018</year><article-title>Evolution of pallium, Hippocampus, and cortical cell types revealed by single-cell transcriptomics in reptiles</article-title><source>Science</source><volume>360</volume><fpage>881</fpage><lpage>888</lpage><pub-id pub-id-type="doi">10.1126/science.aar4237</pub-id><pub-id pub-id-type="pmid">29724907</pub-id></element-citation></ref><ref id="bib170"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Trainito</surname> <given-names>C</given-names></name><name><surname>von Nicolai</surname> <given-names>C</given-names></name><name><surname>Miller</surname> <given-names>EK</given-names></name><name><surname>Siegel</surname> <given-names>M</given-names></name></person-group><year iso-8601-date="2019">2019</year><article-title>Extracellular spike waveform dissociates four functionally distinct cell classes in primate cortex</article-title><source>Current Biology</source><volume>29</volume><fpage>2973</fpage><lpage>2982</lpage><pub-id pub-id-type="doi">10.1016/j.cub.2019.07.051</pub-id><pub-id pub-id-type="pmid">31447374</pub-id></element-citation></ref><ref id="bib171"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Trautmann</surname> <given-names>EM</given-names></name><name><surname>Stavisky</surname> <given-names>SD</given-names></name><name><surname>Lahiri</surname> <given-names>S</given-names></name><name><surname>Ames</surname> <given-names>KC</given-names></name><name><surname>Kaufman</surname> <given-names>MT</given-names></name><name><surname>O'Shea</surname> <given-names>DJ</given-names></name><name><surname>Vyas</surname> <given-names>S</given-names></name><name><surname>Sun</surname> <given-names>X</given-names></name><name><surname>Ryu</surname> <given-names>SI</given-names></name><name><surname>Ganguli</surname> <given-names>S</given-names></name><name><surname>Shenoy</surname> <given-names>KV</given-names></name></person-group><year iso-8601-date="2019">2019</year><article-title>Accurate estimation of neural population dynamics without spike sorting</article-title><source>Neuron</source><volume>103</volume><fpage>292</fpage><lpage>308</lpage><pub-id pub-id-type="doi">10.1016/j.neuron.2019.05.003</pub-id><pub-id pub-id-type="pmid">31171448</pub-id></element-citation></ref><ref id="bib172"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Tremblay</surname> <given-names>S</given-names></name><name><surname>Acker</surname> <given-names>L</given-names></name><name><surname>Afraz</surname> <given-names>A</given-names></name><name><surname>Albaugh</surname> <given-names>DL</given-names></name><name><surname>Amita</surname> <given-names>H</given-names></name><name><surname>Andrei</surname> <given-names>AR</given-names></name><name><surname>Angelucci</surname> <given-names>A</given-names></name><name><surname>Aschner</surname> <given-names>A</given-names></name><name><surname>Balan</surname> <given-names>PF</given-names></name><name><surname>Basso</surname> <given-names>MA</given-names></name><name><surname>Benvenuti</surname> <given-names>G</given-names></name><name><surname>Bohlen</surname> <given-names>MO</given-names></name><name><surname>Caiola</surname> <given-names>MJ</given-names></name><name><surname>Calcedo</surname> <given-names>R</given-names></name><name><surname>Cavanaugh</surname> <given-names>J</given-names></name><name><surname>Chen</surname> <given-names>Y</given-names></name><name><surname>Chen</surname> <given-names>S</given-names></name><name><surname>Chernov</surname> <given-names>MM</given-names></name><name><surname>Clark</surname> <given-names>AM</given-names></name><name><surname>Dai</surname> <given-names>J</given-names></name><name><surname>Debes</surname> <given-names>SR</given-names></name><name><surname>Deisseroth</surname> <given-names>K</given-names></name><name><surname>Desimone</surname> <given-names>R</given-names></name><name><surname>Dragoi</surname> <given-names>V</given-names></name><name><surname>Egger</surname> <given-names>SW</given-names></name><name><surname>Eldridge</surname> <given-names>MAG</given-names></name><name><surname>El-Nahal</surname> <given-names>HG</given-names></name><name><surname>Fabbrini</surname> <given-names>F</given-names></name><name><surname>Federer</surname> <given-names>F</given-names></name><name><surname>Fetsch</surname> <given-names>CR</given-names></name><name><surname>Fortuna</surname> <given-names>MG</given-names></name><name><surname>Friedman</surname> <given-names>RM</given-names></name><name><surname>Fujii</surname> <given-names>N</given-names></name><name><surname>Gail</surname> <given-names>A</given-names></name><name><surname>Galvan</surname> <given-names>A</given-names></name><name><surname>Ghosh</surname> <given-names>S</given-names></name><name><surname>Gieselmann</surname> <given-names>MA</given-names></name><name><surname>Gulli</surname> <given-names>RA</given-names></name><name><surname>Hikosaka</surname> <given-names>O</given-names></name><name><surname>Hosseini</surname> <given-names>EA</given-names></name><name><surname>Hu</surname> <given-names>X</given-names></name><name><surname>H&#252;er</surname> <given-names>J</given-names></name><name><surname>Inoue</surname> <given-names>KI</given-names></name><name><surname>Janz</surname> <given-names>R</given-names></name><name><surname>Jazayeri</surname> <given-names>M</given-names></name><name><surname>Jiang</surname> <given-names>R</given-names></name><name><surname>Ju</surname> <given-names>N</given-names></name><name><surname>Kar</surname> <given-names>K</given-names></name><name><surname>Klein</surname> <given-names>C</given-names></name><name><surname>Kohn</surname> <given-names>A</given-names></name><name><surname>Komatsu</surname> <given-names>M</given-names></name><name><surname>Maeda</surname> <given-names>K</given-names></name><name><surname>Martinez-Trujillo</surname> <given-names>JC</given-names></name><name><surname>Matsumoto</surname> <given-names>M</given-names></name><name><surname>Maunsell</surname> <given-names>JHR</given-names></name><name><surname>Mendoza-Halliday</surname> <given-names>D</given-names></name><name><surname>Monosov</surname> <given-names>IE</given-names></name><name><surname>Muers</surname> <given-names>RS</given-names></name><name><surname>Nurminen</surname> <given-names>L</given-names></name><name><surname>Ortiz-Rios</surname> <given-names>M</given-names></name><name><surname>O'Shea</surname> <given-names>DJ</given-names></name><name><surname>Palfi</surname> <given-names>S</given-names></name><name><surname>Petkov</surname> <given-names>CI</given-names></name><name><surname>Pojoga</surname> <given-names>S</given-names></name><name><surname>Rajalingham</surname> <given-names>R</given-names></name><name><surname>Ramakrishnan</surname> <given-names>C</given-names></name><name><surname>Remington</surname> <given-names>ED</given-names></name><name><surname>Revsine</surname> <given-names>C</given-names></name><name><surname>Roe</surname> <given-names>AW</given-names></name><name><surname>Sabes</surname> <given-names>PN</given-names></name><name><surname>Saunders</surname> <given-names>RC</given-names></name><name><surname>Scherberger</surname> <given-names>H</given-names></name><name><surname>Schmid</surname> <given-names>MC</given-names></name><name><surname>Schultz</surname> <given-names>W</given-names></name><name><surname>Seidemann</surname> <given-names>E</given-names></name><name><surname>Senova</surname> <given-names>YS</given-names></name><name><surname>Shadlen</surname> <given-names>MN</given-names></name><name><surname>Sheinberg</surname> <given-names>DL</given-names></name><name><surname>Siu</surname> <given-names>C</given-names></name><name><surname>Smith</surname> <given-names>Y</given-names></name><name><surname>Solomon</surname> <given-names>SS</given-names></name><name><surname>Sommer</surname> <given-names>MA</given-names></name><name><surname>Spudich</surname> <given-names>JL</given-names></name><name><surname>Stauffer</surname> <given-names>WR</given-names></name><name><surname>Takada</surname> <given-names>M</given-names></name><name><surname>Tang</surname> <given-names>S</given-names></name><name><surname>Thiele</surname> <given-names>A</given-names></name><name><surname>Treue</surname> <given-names>S</given-names></name><name><surname>Vanduffel</surname> <given-names>W</given-names></name><name><surname>Vogels</surname> <given-names>R</given-names></name><name><surname>Whitmire</surname> <given-names>MP</given-names></name><name><surname>Wichmann</surname> <given-names>T</given-names></name><name><surname>Wurtz</surname> <given-names>RH</given-names></name><name><surname>Xu</surname> <given-names>H</given-names></name><name><surname>Yazdan-Shahmorad</surname> <given-names>A</given-names></name><name><surname>Shenoy</surname> <given-names>KV</given-names></name><name><surname>DiCarlo</surname> <given-names>JJ</given-names></name><name><surname>Platt</surname> <given-names>ML</given-names></name></person-group><year iso-8601-date="2020">2020</year><article-title>An open resource for Non-human primate optogenetics</article-title><source>Neuron</source><volume>108</volume><fpage>1075</fpage><lpage>1090</lpage><pub-id pub-id-type="doi">10.1016/j.neuron.2020.09.027</pub-id><pub-id pub-id-type="pmid">33080229</pub-id></element-citation></ref><ref id="bib173"><element-citation publication-type="confproc"><person-group person-group-type="author"><name><surname>Verleysen</surname> <given-names>M</given-names></name><name><surname>Francois</surname> <given-names>D</given-names></name><name><surname>Simon</surname> <given-names>G</given-names></name><name><surname>Wertz</surname> <given-names>V</given-names></name></person-group><year iso-8601-date="2003">2003</year><article-title><italic>Artificial neural nets problem solving methods,</italic> </article-title><conf-name>7th International Work-Conference on Artificial and Natural Neural Networks</conf-name><fpage>105</fpage><lpage>112</lpage><pub-id pub-id-type="doi">10.1007/3-540-44869-1</pub-id></element-citation></ref><ref id="bib174"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Vigneswaran</surname> <given-names>G</given-names></name><name><surname>Kraskov</surname> <given-names>A</given-names></name><name><surname>Lemon</surname> <given-names>RN</given-names></name></person-group><year iso-8601-date="2011">2011</year><article-title>Large identified pyramidal cells in macaque motor and premotor cortex exhibit "thin spikes": implications for cell type classification</article-title><source>Journal of Neuroscience</source><volume>31</volume><fpage>14235</fpage><lpage>14242</lpage><pub-id pub-id-type="doi">10.1523/JNEUROSCI.3142-11.2011</pub-id><pub-id pub-id-type="pmid">21976508</pub-id></element-citation></ref><ref id="bib175"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Viskontas</surname> <given-names>IV</given-names></name><name><surname>Ekstrom</surname> <given-names>AD</given-names></name><name><surname>Wilson</surname> <given-names>CL</given-names></name><name><surname>Fried</surname> <given-names>I</given-names></name></person-group><year iso-8601-date="2007">2007</year><article-title>Characterizing interneuron and pyramidal cells in the human medial temporal lobe in vivo using extracellular recordings</article-title><source>Hippocampus</source><volume>17</volume><fpage>49</fpage><lpage>57</lpage><pub-id pub-id-type="doi">10.1002/hipo.20241</pub-id><pub-id pub-id-type="pmid">17143903</pub-id></element-citation></ref><ref id="bib176"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Vormstein-Schneider</surname> <given-names>D</given-names></name><name><surname>Lin</surname> <given-names>JD</given-names></name><name><surname>Pelkey</surname> <given-names>KA</given-names></name><name><surname>Chittajallu</surname> <given-names>R</given-names></name><name><surname>Guo</surname> <given-names>B</given-names></name><name><surname>Arias-Garcia</surname> <given-names>MA</given-names></name><name><surname>Allaway</surname> <given-names>K</given-names></name><name><surname>Sakopoulos</surname> <given-names>S</given-names></name><name><surname>Schneider</surname> <given-names>G</given-names></name><name><surname>Stevenson</surname> <given-names>O</given-names></name><name><surname>Vergara</surname> <given-names>J</given-names></name><name><surname>Sharma</surname> <given-names>J</given-names></name><name><surname>Zhang</surname> <given-names>Q</given-names></name><name><surname>Franken</surname> <given-names>TP</given-names></name><name><surname>Smith</surname> <given-names>J</given-names></name><name><surname>Ibrahim</surname> <given-names>LA</given-names></name><name><surname>M Astro</surname> <given-names>KJ</given-names></name><name><surname>Sabri</surname> <given-names>E</given-names></name><name><surname>Huang</surname> <given-names>S</given-names></name><name><surname>Favuzzi</surname> <given-names>E</given-names></name><name><surname>Burbridge</surname> <given-names>T</given-names></name><name><surname>Xu</surname> <given-names>Q</given-names></name><name><surname>Guo</surname> <given-names>L</given-names></name><name><surname>Vogel</surname> <given-names>I</given-names></name><name><surname>Sanchez</surname> <given-names>V</given-names></name><name><surname>Saldi</surname> <given-names>GA</given-names></name><name><surname>Gorissen</surname> <given-names>BL</given-names></name><name><surname>Yuan</surname> <given-names>X</given-names></name><name><surname>Zaghloul</surname> <given-names>KA</given-names></name><name><surname>Devinsky</surname> <given-names>O</given-names></name><name><surname>Sabatini</surname> <given-names>BL</given-names></name><name><surname>Batista-Brito</surname> <given-names>R</given-names></name><name><surname>Reynolds</surname> <given-names>J</given-names></name><name><surname>Feng</surname> <given-names>G</given-names></name><name><surname>Fu</surname> <given-names>Z</given-names></name><name><surname>McBain</surname> <given-names>CJ</given-names></name><name><surname>Fishell</surname> <given-names>G</given-names></name><name><surname>Dimidschstein</surname> <given-names>J</given-names></name></person-group><year iso-8601-date="2020">2020</year><article-title>Viral manipulation of functionally distinct interneurons in mice, non-human primates and humans</article-title><source>Nature Neuroscience</source><volume>23</volume><fpage>1629</fpage><lpage>1636</lpage><pub-id pub-id-type="doi">10.1038/s41593-020-0692-9</pub-id><pub-id pub-id-type="pmid">32807948</pub-id></element-citation></ref><ref id="bib177"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Wang</surname> <given-names>J</given-names></name><name><surname>Narain</surname> <given-names>D</given-names></name><name><surname>Hosseini</surname> <given-names>EA</given-names></name><name><surname>Jazayeri</surname> <given-names>M</given-names></name></person-group><year iso-8601-date="2018">2018</year><article-title>Flexible timing by temporal scaling of cortical responses</article-title><source>Nature Neuroscience</source><volume>21</volume><fpage>102</fpage><lpage>110</lpage><pub-id pub-id-type="doi">10.1038/s41593-017-0028-6</pub-id><pub-id pub-id-type="pmid">29203897</pub-id></element-citation></ref><ref id="bib178"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Weir</surname> <given-names>K</given-names></name><name><surname>Blanquie</surname> <given-names>O</given-names></name><name><surname>Kilb</surname> <given-names>W</given-names></name><name><surname>Luhmann</surname> <given-names>HJ</given-names></name><name><surname>Sinning</surname> <given-names>A</given-names></name></person-group><year iso-8601-date="2014">2014</year><article-title>Comparison of spike parameters from optically identified GABAergic and glutamatergic neurons in sparse cortical cultures</article-title><source>Frontiers in Cellular Neuroscience</source><volume>8</volume><elocation-id>460</elocation-id><pub-id pub-id-type="doi">10.3389/fncel.2014.00460</pub-id><pub-id pub-id-type="pmid">25642167</pub-id></element-citation></ref><ref id="bib179"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Williams</surname> <given-names>AH</given-names></name><name><surname>Kim</surname> <given-names>TH</given-names></name><name><surname>Wang</surname> <given-names>F</given-names></name><name><surname>Vyas</surname> <given-names>S</given-names></name><name><surname>Ryu</surname> <given-names>SI</given-names></name><name><surname>Shenoy</surname> <given-names>KV</given-names></name><name><surname>Schnitzer</surname> <given-names>M</given-names></name><name><surname>Kolda</surname> <given-names>TG</given-names></name><name><surname>Ganguli</surname> <given-names>S</given-names></name></person-group><year iso-8601-date="2018">2018</year><article-title>Unsupervised discovery of demixed, Low-Dimensional neural dynamics across multiple timescales through tensor component analysis</article-title><source>Neuron</source><volume>98</volume><fpage>1099</fpage><lpage>1115</lpage><pub-id pub-id-type="doi">10.1016/j.neuron.2018.05.015</pub-id><pub-id pub-id-type="pmid">29887338</pub-id></element-citation></ref><ref id="bib180"><element-citation publication-type="book"><person-group person-group-type="author"><name><surname>Yang</surname> <given-names>L</given-names></name><name><surname>Jin</surname> <given-names>R</given-names></name></person-group><year iso-8601-date="2006">2006</year><source>Distance Metric Learning: A Comprehensive Survey</source><publisher-name>Michigan State University</publisher-name></element-citation></ref><ref id="bib181"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Zaitsev</surname> <given-names>AV</given-names></name><name><surname>Gonzalez-Burgos</surname> <given-names>G</given-names></name><name><surname>Povysheva</surname> <given-names>NV</given-names></name><name><surname>Kr&#246;ner</surname> <given-names>S</given-names></name><name><surname>Lewis</surname> <given-names>DA</given-names></name><name><surname>Krimer</surname> <given-names>LS</given-names></name></person-group><year iso-8601-date="2005">2005</year><article-title>Localization of calcium-binding proteins in physiologically and morphologically characterized interneurons of monkey dorsolateral prefrontal cortex</article-title><source>Cerebral Cortex</source><volume>15</volume><fpage>1178</fpage><lpage>1186</lpage><pub-id pub-id-type="doi">10.1093/cercor/bhh218</pub-id><pub-id pub-id-type="pmid">15590911</pub-id></element-citation></ref><ref id="bib182"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Zaitsev</surname> <given-names>AV</given-names></name><name><surname>Povysheva</surname> <given-names>NV</given-names></name><name><surname>Gonzalez-Burgos</surname> <given-names>G</given-names></name><name><surname>Rotaru</surname> <given-names>D</given-names></name><name><surname>Fish</surname> <given-names>KN</given-names></name><name><surname>Krimer</surname> <given-names>LS</given-names></name><name><surname>Lewis</surname> <given-names>DA</given-names></name></person-group><year iso-8601-date="2009">2009</year><article-title>Interneuron diversity in layers 2-3 of monkey prefrontal cortex</article-title><source>Cerebral Cortex</source><volume>19</volume><fpage>1597</fpage><lpage>1615</lpage><pub-id pub-id-type="doi">10.1093/cercor/bhn198</pub-id><pub-id pub-id-type="pmid">19015370</pub-id></element-citation></ref><ref id="bib183"><element-citation publication-type="preprint"><person-group person-group-type="author"><name><surname>Zhu</surname> <given-names>S</given-names></name><name><surname>Xia</surname> <given-names>R</given-names></name><name><surname>Chen</surname> <given-names>X</given-names></name><name><surname>Moore</surname> <given-names>T</given-names></name></person-group><year iso-8601-date="2020">2020</year><article-title>Heterogeneity of neuronal populations within columns of primate V1 revealed by High-Density recordings</article-title><source>bioRxiv</source><pub-id pub-id-type="doi">10.1101/2020.12.22.424048</pub-id></element-citation></ref></ref-list><app-group><app id="appendix-1"><title>Appendix 1</title><boxed-text><sec id="s8" sec-type="appendix"><title>Supplementary Information</title><sec id="s8-1"><title>Clustering in high-dimensionality and the curse of dimensionality</title><p>Clustering in high-dimensions is a difficult problem. In particular, &#8220;concentration of measure&#8221; results in Euclidean distances, used by k-Means clustering, becoming meaningless as a measure of distance for clustering (<xref ref-type="bibr" rid="bib1">Aggarwal et al., 2001</xref>). Specifically, as dimensionality increases, the difference between distances of randomly chosen points all converge to the same constant distance (<xref ref-type="bibr" rid="bib173">Verleysen et al., 2003</xref>; <xref ref-type="bibr" rid="bib20">Beyer et al., 1999</xref>). UMAP counters this by using graph distances on a nearest neighbor graph which is not susceptible to the concentration of measure phenomenon (<xref ref-type="bibr" rid="bib107">McInnes, 2019</xref>). A common strategy is to cluster in some low-dimensional projected space with a dimensionality reduction method that preserves important latent structure. These use clustering methods like a Gaussian mixture model (GMM) or k-Means but clusters in low-dimension are not necessarily Gaussian in shape (an assumption of both the GMM and k-Means). This violation is induced by the perturbations introduced by non-linear projections even if the distributions were Gaussian in the original high-dimensional space.</p></sec><sec id="s8-2"><title>UMAP dimensionality reduction</title><p>UMAP is among the class of non-linear dimensionality reductions known as manifold learning algorithms which also includes other well-known methods in neuroscience such as Isomap (<xref ref-type="bibr" rid="bib163">Tenenbaum et al., 2000</xref>) and t-SNE (<xref ref-type="bibr" rid="bib99">Maaten and Hinton, 2008</xref>) (see <xref ref-type="bibr" rid="bib92">Lee and Verleysen, 2007</xref> for a review of methods). Key to this algorithm is the presumption that although data may not be uniformly spaced in the ambient space, it is uniform on some low-dimensional manifold embedded within the high-dimensional space. It is also assumed that the underlying manifold is locally connected (i.e. doesn&#8217;t have any breaks or isolated points) which is satisfied by the local distance metric unit ball extending to of the 1st-nearest neighbor (<xref ref-type="fig" rid="fig2">Figure 2A-ii.b</xref>). This leads to the conclusion that the underlying notion of distance (Riemannian metric) changes in each region of the manifold: the notion of a unit distance &#8220;stretches&#8221; in areas of sparser density and &#8220;shortens&#8221; in areas of higher density. This is formalized beginning with defining how a local Riemannian metric should be constructed by Lemma 1 of <xref ref-type="bibr" rid="bib106">McInnes et al., 2018</xref>:</p><sec id="s8-2-1"><title>Lemma 1</title><p>Let <inline-formula><mml:math id="inf53"><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:mi>M</mml:mi></mml:mrow></mml:mstyle></mml:math></inline-formula> be a Riemannian manifold equipped with a metric <inline-formula><mml:math id="inf54"><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:mi>g</mml:mi></mml:mrow></mml:mstyle></mml:math></inline-formula> in ambient space <inline-formula><mml:math id="inf55"><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:msup><mml:mrow><mml:mi mathvariant="double-struck">R</mml:mi></mml:mrow><mml:mi>n</mml:mi></mml:msup></mml:mrow></mml:mstyle></mml:math></inline-formula>. Let <inline-formula><mml:math id="inf56"><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:mi>p</mml:mi><mml:mo>&#8712;</mml:mo><mml:mi>M</mml:mi></mml:mrow></mml:mstyle></mml:math></inline-formula> be a point in this space. If <inline-formula><mml:math id="inf57"><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:mi>g</mml:mi></mml:mrow></mml:mstyle></mml:math></inline-formula> is locally constant about the point <inline-formula><mml:math id="inf58"><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:mi>p</mml:mi></mml:mrow></mml:mstyle></mml:math></inline-formula> in an open neighborhood <inline-formula><mml:math id="inf59"><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:mi>U</mml:mi></mml:mrow></mml:mstyle></mml:math></inline-formula> such that <inline-formula><mml:math id="inf60"><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:mi>g</mml:mi></mml:mrow></mml:mstyle></mml:math></inline-formula> is a constant diagonal matrix in diagonal coordinates, then in a ball <inline-formula><mml:math id="inf61"><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:mi>B</mml:mi><mml:mo>&#8838;</mml:mo><mml:mi>U</mml:mi></mml:mrow></mml:mstyle></mml:math></inline-formula> centered at <inline-formula><mml:math id="inf62"><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:mi>p</mml:mi></mml:mrow></mml:mstyle></mml:math></inline-formula> with a volume <inline-formula><mml:math id="inf63"><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:mfrac><mml:msup><mml:mi>&#960;</mml:mi><mml:mrow><mml:mi>n</mml:mi><mml:mrow><mml:mo>/</mml:mo></mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msup><mml:mrow><mml:mi mathvariant="normal">&#915;</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mi>n</mml:mi><mml:mrow><mml:mo>/</mml:mo></mml:mrow><mml:mn>2</mml:mn><mml:mo>+</mml:mo><mml:mn>1</mml:mn><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mfrac></mml:mrow></mml:mstyle></mml:math></inline-formula> with respect to <inline-formula><mml:math id="inf64"><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:mi>g</mml:mi></mml:mrow></mml:mstyle></mml:math></inline-formula>, the geodesic distance from <inline-formula><mml:math id="inf65"><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:mi>p</mml:mi></mml:mrow></mml:mstyle></mml:math></inline-formula> to any point <inline-formula><mml:math id="inf66"><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:mi>q</mml:mi><mml:mo>&#8712;</mml:mo><mml:mi>B</mml:mi></mml:mrow></mml:mstyle></mml:math></inline-formula> is <inline-formula><mml:math id="inf67"><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:mrow><mml:mfrac><mml:mn>1</mml:mn><mml:mi>r</mml:mi></mml:mfrac><mml:mi>d</mml:mi><mml:msup><mml:mrow><mml:mi mathvariant="double-struck">R</mml:mi></mml:mrow><mml:mi>n</mml:mi></mml:msup><mml:mo stretchy="false">(</mml:mo><mml:mi>p</mml:mi><mml:mo>,</mml:mo><mml:mi>q</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:mstyle></mml:math></inline-formula>, where <inline-formula><mml:math id="inf68"><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:mi>r</mml:mi></mml:mrow></mml:mstyle></mml:math></inline-formula> is the radius of the ball in the ambient space and <inline-formula><mml:math id="inf69"><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:mrow><mml:mfrac><mml:mn>1</mml:mn><mml:mi>r</mml:mi></mml:mfrac><mml:mi>d</mml:mi><mml:msup><mml:mrow><mml:mi mathvariant="double-struck">R</mml:mi></mml:mrow><mml:mi>n</mml:mi></mml:msup><mml:mo stretchy="false">(</mml:mo><mml:mi>p</mml:mi><mml:mo>,</mml:mo><mml:mi>q</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:mstyle></mml:math></inline-formula> is the existing metric on the ambient space.</p><p>Using this definition of <inline-formula><mml:math id="inf70"><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:mi>g</mml:mi></mml:mrow></mml:mstyle></mml:math></inline-formula>, each ball <inline-formula><mml:math id="inf71"><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:mi>B</mml:mi></mml:mrow></mml:mstyle></mml:math></inline-formula> of fixed volume (using the manifold&#8217;s distance metric) should contain the same number of data points in <inline-formula><mml:math id="inf72"><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:mi>X</mml:mi></mml:mrow></mml:mstyle></mml:math></inline-formula> regardless of where on the manifold <inline-formula><mml:math id="inf73"><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:mi>B</mml:mi></mml:mrow></mml:mstyle></mml:math></inline-formula> is ocated. This also implies that a ball centered on data point <inline-formula><mml:math id="inf74"><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:msub><mml:mi>x</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:mrow></mml:mstyle></mml:math></inline-formula> should contain the k-nearest neighbors of <inline-formula><mml:math id="inf75"><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:msub><mml:mi>x</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:mrow></mml:mstyle></mml:math></inline-formula> in a fixed volume no matter which <inline-formula><mml:math id="inf76"><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:msub><mml:mi>x</mml:mi><mml:mi>i</mml:mi></mml:msub><mml:mo>&#8712;</mml:mo><mml:mi>X</mml:mi></mml:mrow></mml:mstyle></mml:math></inline-formula> is chosen. Thus the geodesic distance around each data point is normalized by its distance to its k-nearest neighbor and the assumption of uniform sampling on the manifold is enforced.</p><p>To compensate for the impact of certain nearest neighbors in the ball lying much further than those closer by (as in very sparse regions in ambient space), the normalizing distances are transformed by the exponential function (<xref ref-type="fig" rid="fig2">Figure 2A-ii.c</xref>),<disp-formula id="equ5"><mml:math id="m5"><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:munderover><mml:mo movablelimits="false">&#8721;</mml:mo><mml:mrow><mml:mi>j</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>k</mml:mi></mml:mrow></mml:munderover><mml:mrow><mml:mi mathvariant="normal">e</mml:mi><mml:mi mathvariant="normal">x</mml:mi><mml:mi mathvariant="normal">p</mml:mi></mml:mrow><mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mfrac><mml:mrow><mml:mo>&#8722;</mml:mo><mml:mrow><mml:mo>|</mml:mo><mml:mrow><mml:msub><mml:mi>x</mml:mi><mml:mi>i</mml:mi></mml:msub><mml:mo>&#8722;</mml:mo><mml:mrow><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:msub><mml:mi>i</mml:mi><mml:mi>j</mml:mi></mml:msub></mml:mrow></mml:msub></mml:mrow></mml:mrow><mml:mo>|</mml:mo></mml:mrow></mml:mrow><mml:msub><mml:mi>r</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:mfrac><mml:mo>)</mml:mo></mml:mrow></mml:mrow><mml:mo>=</mml:mo><mml:msub><mml:mrow><mml:mi mathvariant="normal">l</mml:mi><mml:mi mathvariant="normal">o</mml:mi><mml:mi mathvariant="normal">g</mml:mi></mml:mrow><mml:mn>2</mml:mn></mml:msub><mml:mo stretchy="false">(</mml:mo><mml:mi>k</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mstyle></mml:math></disp-formula></p><p>To unite these disparate metric spaces (each data point has a unique local notion of distance), category theory is used to show that an equivalent representation can be made from a fuzzy simplicial set via an adjunction that will not be defined here (see Section 3 of <xref ref-type="bibr" rid="bib106">McInnes et al., 2018</xref> and the Nerve Theorem). In this way, the topological structure of the data can be represented as a metric space or as the union of a set of fuzzy simplices (<xref ref-type="fig" rid="fig2">Figure 2A-ii.e</xref>). One large benefit of this construction is that while normalized distances in high-dimensional spaces suffer from the Curse of Dimensionality in the form of concentration of measure, normalized nearest neighbor distances do not (<xref ref-type="bibr" rid="bib107">McInnes, 2019</xref>).&#160;The end result of this process is an approximation of the topology of the manifold by fuzzy simplicial sets in the form of a &#268;ech complex.</p><p>With this fuzzy topological representation, the low-dimensional representation can be found through an optimization procedure that minimizes the cross-entropy of fuzzy simplicial sets containing the same objects and implemented with a force directed graph layout procedure (<xref ref-type="fig" rid="fig2">Figure 2B-v.a</xref>). Given two fuzzy sets with the same members A and separate membership strength functions &#181; and &#957; of Spivak&#8217;s characteristic form&#160;<xref ref-type="bibr" rid="bib156">Spivak, 2009</xref>, the cross-entropy <inline-formula><mml:math id="inf77"><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:mi>C</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mo stretchy="false">(</mml:mo><mml:mi>A</mml:mi><mml:mo>,</mml:mo><mml:mi>&#956;</mml:mi><mml:mo stretchy="false">)</mml:mo><mml:mo>,</mml:mo><mml:mo stretchy="false">(</mml:mo><mml:mi>A</mml:mi><mml:mo>,</mml:mo><mml:mi>&#957;</mml:mi><mml:mo stretchy="false">)</mml:mo><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mstyle></mml:math></inline-formula> is defined as,<disp-formula id="equ6"><mml:math id="m6"><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:mi>C</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mo stretchy="false">(</mml:mo><mml:mi>A</mml:mi><mml:mo>,</mml:mo><mml:mi>&#956;</mml:mi><mml:mo stretchy="false">)</mml:mo><mml:mo>,</mml:mo><mml:mo stretchy="false">(</mml:mo><mml:mi>A</mml:mi><mml:mo>,</mml:mo><mml:mi>&#957;</mml:mi><mml:mo stretchy="false">)</mml:mo><mml:mo stretchy="false">)</mml:mo><mml:mover><mml:mo>=</mml:mo><mml:mi mathvariant="normal">&#916;</mml:mi></mml:mover><mml:munder><mml:mo movablelimits="false">&#8721;</mml:mo><mml:mrow><mml:mi>a</mml:mi><mml:mo>&#8712;</mml:mo><mml:mi>A</mml:mi></mml:mrow></mml:munder><mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mi>&#956;</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mi>a</mml:mi><mml:mo stretchy="false">)</mml:mo><mml:mi>log</mml:mi><mml:mo>&#8289;</mml:mo><mml:mrow><mml:mfrac><mml:mrow><mml:mi>&#956;</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mi>a</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mrow><mml:mi>&#957;</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mi>a</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mfrac></mml:mrow><mml:mo>+</mml:mo><mml:mo stretchy="false">(</mml:mo><mml:mn>1</mml:mn><mml:mo>&#8722;</mml:mo><mml:mi>&#956;</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mi>a</mml:mi><mml:mo stretchy="false">)</mml:mo><mml:mo stretchy="false">)</mml:mo><mml:mi>log</mml:mi><mml:mo>&#8289;</mml:mo><mml:mrow><mml:mfrac><mml:mrow><mml:mn>1</mml:mn><mml:mo>&#8722;</mml:mo><mml:mi>&#956;</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mi>a</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mrow><mml:mn>1</mml:mn><mml:mo>&#8722;</mml:mo><mml:mi>&#957;</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mi>a</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mfrac></mml:mrow></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:mrow><mml:mo>.</mml:mo></mml:mrow></mml:mstyle></mml:math></disp-formula></p><p>The first term <inline-formula><mml:math id="inf78"><mml:mrow><mml:mi>&#956;</mml:mi><mml:mo>&#8290;</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>a</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>&#8290;</mml:mo><mml:mrow><mml:mi>log</mml:mi><mml:mo>&#8289;</mml:mo><mml:mfrac><mml:mrow><mml:mi>&#956;</mml:mi><mml:mo>&#8290;</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>a</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mi>&#957;</mml:mi><mml:mo>&#8290;</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>a</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:mfrac></mml:mrow></mml:mrow></mml:math></inline-formula> captures the attractive force minimised if short edges in high-dimension correspond to short edges in low-dimension and <inline-formula><mml:math id="inf79"><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>1</mml:mn><mml:mo>-</mml:mo><mml:mrow><mml:mi>&#956;</mml:mi><mml:mo>&#8290;</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>a</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>&#8290;</mml:mo><mml:mrow><mml:mi>log</mml:mi><mml:mo>&#8289;</mml:mo><mml:mfrac><mml:mrow><mml:mn>1</mml:mn><mml:mo>-</mml:mo><mml:mrow><mml:mi>&#956;</mml:mi><mml:mo>&#8290;</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>a</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:mrow><mml:mrow><mml:mn>1</mml:mn><mml:mo>-</mml:mo><mml:mrow><mml:mi>&#957;</mml:mi><mml:mo>&#8290;</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>a</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:mrow></mml:mfrac></mml:mrow></mml:mrow></mml:math></inline-formula> is the repulsive forces that are minimised if long edges in high-dimension correspond to short edges in low-dimension or vice versa.</p><p>From a computational perspective, this whole UMAP process proceeds in two steps: construction of a k-nearest neighbor graph (<xref ref-type="fig" rid="fig2">Figure 2A</xref>) and layout of the graph (<xref ref-type="fig" rid="fig2">Figure 2B</xref>) into a low-dimensional manifold. Note that after the first step, the <inline-formula><mml:math id="inf80"><mml:mi>k</mml:mi></mml:math></inline-formula>-nearest neighbor graph is passed to Louvain community detection (<xref ref-type="fig" rid="fig2">Figure 2B-iv</xref>) and thus the clustering is not dependent on the embedding of the graph, just on its construction and associated UMAP parameters such as n_neighbors and metric but not layout parameters such as min_dist. The embedding <italic>is</italic> however used for visualization and interpretability and for consistency, the random seed for UMAP&#8217;s layout procedure is set (UMAP will use gradient descent vs. stochastic gradient descent to compute the force directed graph layout).</p></sec><sec id="s8-2-2"><title>Graph construction</title><p>Given a set of data points <inline-formula><mml:math id="inf81"><mml:mrow><mml:mi>X</mml:mi><mml:mo>=</mml:mo><mml:mrow><mml:mo stretchy="false">{</mml:mo><mml:msub><mml:mi>x</mml:mi><mml:mn>1</mml:mn></mml:msub><mml:mo>,</mml:mo><mml:mi mathvariant="normal">&#8230;</mml:mi><mml:mo>,</mml:mo><mml:msub><mml:mi>x</mml:mi><mml:mi>N</mml:mi></mml:msub><mml:mo stretchy="false">}</mml:mo></mml:mrow></mml:mrow></mml:math></inline-formula> and a metric <inline-formula><mml:math id="inf82"><mml:mi>d</mml:mi></mml:math></inline-formula>, the construction of an undirected weighted <inline-formula><mml:math id="inf83"><mml:mi>k</mml:mi></mml:math></inline-formula>-nearest neighbor graph (captured by an adjacency matrix capturing the connection weights between nodes) is conducted using a nearest neighbor descent algorithm (<xref ref-type="bibr" rid="bib49">Dong et al., 2011</xref>). For each data point <inline-formula><mml:math id="inf84"><mml:mrow><mml:msub><mml:mi>x</mml:mi><mml:mi>i</mml:mi></mml:msub><mml:mo>&#8712;</mml:mo><mml:mi>X</mml:mi></mml:mrow></mml:math></inline-formula> and fixed nearest neighbor hyperparameter <inline-formula><mml:math id="inf85"><mml:mi>k</mml:mi></mml:math></inline-formula>, we have the set <inline-formula><mml:math id="inf86"><mml:mrow><mml:mo stretchy="false">{</mml:mo><mml:mrow><mml:msub><mml:mi>x</mml:mi><mml:msub><mml:mi>i</mml:mi><mml:mn>1</mml:mn></mml:msub></mml:msub><mml:mo>&#8290;</mml:mo><mml:mi mathvariant="normal">&#8230;</mml:mi><mml:mo>&#8290;</mml:mo><mml:msub><mml:mi>x</mml:mi><mml:msub><mml:mi>i</mml:mi><mml:mi>k</mml:mi></mml:msub></mml:msub></mml:mrow><mml:mo stretchy="false">}</mml:mo></mml:mrow></mml:math></inline-formula> the set of <inline-formula><mml:math id="inf87"><mml:mi>k</mml:mi></mml:math></inline-formula>-nearest neighbors of <italic>x</italic><sub><italic>i</italic></sub> under the local Riemannian metric <inline-formula><mml:math id="inf88"><mml:mi>d</mml:mi></mml:math></inline-formula>. We define <inline-formula><mml:math id="inf89"><mml:msub><mml:mi>&#961;</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:math></inline-formula> and <inline-formula><mml:math id="inf90"><mml:msub><mml:mi>&#963;</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:math></inline-formula> such that,<disp-formula id="equ7"><mml:math id="m7"><mml:mrow><mml:msub><mml:mi>&#961;</mml:mi><mml:mi>i</mml:mi></mml:msub><mml:mo>=</mml:mo><mml:mrow><mml:mi>min</mml:mi><mml:mo>&#8289;</mml:mo><mml:mrow><mml:mo stretchy="false">{</mml:mo><mml:mrow><mml:mi>d</mml:mi><mml:mo>&#8290;</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:msub><mml:mi>x</mml:mi><mml:mi>i</mml:mi></mml:msub><mml:mo>,</mml:mo><mml:msub><mml:mi>x</mml:mi><mml:msub><mml:mi>i</mml:mi><mml:mi>j</mml:mi></mml:msub></mml:msub><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>&#8290;</mml:mo><mml:mrow><mml:mo fence="true" stretchy="false">|</mml:mo><mml:mrow><mml:mrow><mml:mn>1</mml:mn><mml:mo>&#8804;</mml:mo><mml:mi>j</mml:mi><mml:mo>&#8804;</mml:mo><mml:mi>k</mml:mi></mml:mrow><mml:mo>,</mml:mo><mml:mrow><mml:mi>d</mml:mi><mml:mo>&#8290;</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:msub><mml:mi>x</mml:mi><mml:mi>i</mml:mi></mml:msub><mml:mo>,</mml:mo><mml:msub><mml:mi>x</mml:mi><mml:msub><mml:mi>i</mml:mi><mml:mi>j</mml:mi></mml:msub></mml:msub><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:mrow><mml:mo>&gt;</mml:mo></mml:mrow><mml:mo>&#8290;</mml:mo><mml:mn>0</mml:mn></mml:mrow><mml:mo stretchy="false">}</mml:mo></mml:mrow></mml:mrow></mml:mrow></mml:math></disp-formula>and setting <inline-formula><mml:math id="inf91"><mml:msub><mml:mi>&#963;</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:math></inline-formula> such that,<disp-formula id="equ8"><mml:math id="m8"><mml:mrow><mml:mrow><mml:mrow><mml:msub><mml:mi>log</mml:mi><mml:mn>2</mml:mn></mml:msub><mml:mo>&#8289;</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>k</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mo>=</mml:mo><mml:mrow><mml:mstyle displaystyle="true"><mml:munderover><mml:mo largeop="true" movablelimits="false" symmetric="true">&#8721;</mml:mo><mml:mrow><mml:mi>j</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mi>k</mml:mi></mml:munderover></mml:mstyle><mml:mrow><mml:mi>exp</mml:mi><mml:mo>&#8289;</mml:mo><mml:mrow><mml:mo maxsize="160%" minsize="160%">(</mml:mo><mml:mstyle displaystyle="true"><mml:mfrac><mml:mrow><mml:mo>-</mml:mo><mml:mrow><mml:mi>max</mml:mi><mml:mo>&#8289;</mml:mo><mml:mrow><mml:mo maxsize="120%" minsize="120%">(</mml:mo><mml:mn>0</mml:mn><mml:mo>,</mml:mo><mml:mrow><mml:mrow><mml:mi>d</mml:mi><mml:mo>&#8290;</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:msub><mml:mi>x</mml:mi><mml:mi>i</mml:mi></mml:msub><mml:mo>,</mml:mo><mml:msub><mml:mi>x</mml:mi><mml:msub><mml:mi>i</mml:mi><mml:mi>j</mml:mi></mml:msub></mml:msub><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mo>-</mml:mo><mml:msub><mml:mi>&#961;</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:mrow><mml:mo maxsize="120%" minsize="120%">)</mml:mo></mml:mrow></mml:mrow></mml:mrow><mml:msub><mml:mi>&#963;</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:mfrac></mml:mstyle><mml:mo maxsize="160%" minsize="160%">)</mml:mo></mml:mrow></mml:mrow></mml:mrow></mml:mrow><mml:mo>.</mml:mo></mml:mrow></mml:math></disp-formula></p><p>The weighted graph <inline-formula><mml:math id="inf92"><mml:mrow><mml:mover accent="true"><mml:mi>G</mml:mi><mml:mo>&#175;</mml:mo></mml:mover><mml:mo>=</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>V</mml:mi><mml:mo>,</mml:mo><mml:mi>E</mml:mi><mml:mo>,</mml:mo><mml:mi>w</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:math></inline-formula> is defined in terms of the vertices <inline-formula><mml:math id="inf93"><mml:mi>V</mml:mi></mml:math></inline-formula>, edges <inline-formula><mml:math id="inf94"><mml:mrow><mml:mi>E</mml:mi><mml:mo>=</mml:mo><mml:mrow><mml:mo stretchy="false">{</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:msub><mml:mi>x</mml:mi><mml:mi>i</mml:mi></mml:msub><mml:mo>,</mml:mo><mml:msub><mml:mi>x</mml:mi><mml:msub><mml:mi>i</mml:mi><mml:mi>j</mml:mi></mml:msub></mml:msub><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo stretchy="false">|</mml:mo><mml:mrow><mml:mrow><mml:mn>1</mml:mn><mml:mo>&#8804;</mml:mo><mml:mi>j</mml:mi><mml:mo>&#8804;</mml:mo><mml:mi>k</mml:mi></mml:mrow><mml:mo>,</mml:mo><mml:mrow><mml:mn>1</mml:mn><mml:mo>&#8804;</mml:mo><mml:mi>i</mml:mi><mml:mo>&#8804;</mml:mo><mml:mi>N</mml:mi></mml:mrow></mml:mrow><mml:mo stretchy="false">}</mml:mo></mml:mrow></mml:mrow></mml:math></inline-formula>, and weight function <inline-formula><mml:math id="inf95"><mml:mi>w</mml:mi></mml:math></inline-formula> as,<disp-formula id="equ9"><mml:math id="m9"><mml:mrow><mml:mrow><mml:mi>w</mml:mi><mml:mo>&#8290;</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:msub><mml:mi>x</mml:mi><mml:mi>i</mml:mi></mml:msub><mml:mo>,</mml:mo><mml:msub><mml:mi>x</mml:mi><mml:msub><mml:mi>i</mml:mi><mml:mi>j</mml:mi></mml:msub></mml:msub><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mo>=</mml:mo><mml:mrow><mml:mstyle displaystyle="true"><mml:munderover><mml:mo largeop="true" movablelimits="false" symmetric="true">&#8721;</mml:mo><mml:mrow><mml:mi>j</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mi>k</mml:mi></mml:munderover></mml:mstyle><mml:mrow><mml:mi>exp</mml:mi><mml:mo>&#8289;</mml:mo><mml:mrow><mml:mo maxsize="160%" minsize="160%">(</mml:mo><mml:mstyle displaystyle="true"><mml:mfrac><mml:mrow><mml:mo>-</mml:mo><mml:mrow><mml:mi>max</mml:mi><mml:mo>&#8289;</mml:mo><mml:mrow><mml:mo maxsize="120%" minsize="120%">(</mml:mo><mml:mn>0</mml:mn><mml:mo>,</mml:mo><mml:mrow><mml:mrow><mml:mi>d</mml:mi><mml:mo>&#8290;</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:msub><mml:mi>x</mml:mi><mml:mi>i</mml:mi></mml:msub><mml:mo>,</mml:mo><mml:msub><mml:mi>x</mml:mi><mml:msub><mml:mi>i</mml:mi><mml:mi>j</mml:mi></mml:msub></mml:msub><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mo>-</mml:mo><mml:msub><mml:mi>&#961;</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:mrow><mml:mo maxsize="120%" minsize="120%">)</mml:mo></mml:mrow></mml:mrow></mml:mrow><mml:msub><mml:mi>&#963;</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:mfrac></mml:mstyle><mml:mo maxsize="160%" minsize="160%">)</mml:mo></mml:mrow></mml:mrow></mml:mrow></mml:mrow></mml:math></disp-formula></p><p>. If <inline-formula><mml:math id="inf96"><mml:mi>A</mml:mi></mml:math></inline-formula> is the weighted adjacency matrix of <inline-formula><mml:math id="inf97"><mml:mover accent="true"><mml:mi>G</mml:mi><mml:mo>&#175;</mml:mo></mml:mover></mml:math></inline-formula>, we can get the undirected weighted graph <inline-formula><mml:math id="inf98"><mml:mi>B</mml:mi></mml:math></inline-formula> by the relationship,<disp-formula id="equ10"><mml:math id="m10"><mml:mrow><mml:mi>B</mml:mi><mml:mo>=</mml:mo><mml:mrow><mml:mrow><mml:mi>A</mml:mi><mml:mo>+</mml:mo><mml:msup><mml:mi>A</mml:mi><mml:mo>&#8890;</mml:mo></mml:msup></mml:mrow><mml:mo>-</mml:mo><mml:mrow><mml:mi>A</mml:mi><mml:mo>&#8728;</mml:mo><mml:msup><mml:mi>A</mml:mi><mml:mo>&#8890;</mml:mo></mml:msup></mml:mrow></mml:mrow></mml:mrow></mml:math></disp-formula>where o is the Hadamard product.</p></sec><sec id="s8-2-3"><title>Graph layout</title><p>The UMAP algorithm finds a low-dimensional projection (manifold) of the high-dimensional data by a force directed layout of the constructed graph. Before this is done though, the graph is spectrally embedded to aid in consistency and convergence of the algorithm through initialization (<xref ref-type="bibr" rid="bib17">Belkin and Niyogi, 2002</xref>; <xref ref-type="bibr" rid="bib19">Bengio et al., 2003</xref>; <xref ref-type="bibr" rid="bib85">Kobak and Linderman, 2019</xref>). The symmetric normalized Laplacian <inline-formula><mml:math id="inf99"><mml:mi>L</mml:mi></mml:math></inline-formula> of the graph is calculated for the 1-skeleton of the weighted graph which is analogous to the Laplace-Beltrami operator (divergence of the gradient, <inline-formula><mml:math id="inf100"><mml:mrow><mml:mrow><mml:mi mathvariant="normal">&#916;</mml:mi><mml:mo>&#8290;</mml:mo><mml:mi>f</mml:mi></mml:mrow><mml:mo>=</mml:mo><mml:mrow><mml:msup><mml:mo>&#8711;</mml:mo><mml:mn>2</mml:mn></mml:msup><mml:mo>&#8289;</mml:mo><mml:mi>f</mml:mi></mml:mrow></mml:mrow></mml:math></inline-formula>) on a manifold. If <inline-formula><mml:math id="inf101"><mml:mi>D</mml:mi></mml:math></inline-formula> is the degree matrix (a diagonal matrix containing the degree of each vertex) of the adjacency matrix <inline-formula><mml:math id="inf102"><mml:mi>A</mml:mi></mml:math></inline-formula>, we compute the Laplacian matrix as,<disp-formula id="equ11"><mml:math id="m11"><mml:mrow><mml:mi>L</mml:mi><mml:mo>=</mml:mo><mml:mrow><mml:msup><mml:mi>D</mml:mi><mml:mfrac><mml:mn>1</mml:mn><mml:mn>2</mml:mn></mml:mfrac></mml:msup><mml:mo>&#8290;</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>D</mml:mi><mml:mo>-</mml:mo><mml:mi>A</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>&#8290;</mml:mo><mml:msup><mml:mi>D</mml:mi><mml:mfrac><mml:mn>1</mml:mn><mml:mn>2</mml:mn></mml:mfrac></mml:msup></mml:mrow></mml:mrow></mml:math></disp-formula>with associated eigenvectors <inline-formula><mml:math id="inf103"><mml:mi>y</mml:mi></mml:math></inline-formula> and eigenvalues &#955;,<disp-formula id="equ12"><mml:math id="m12"><mml:mrow><mml:mrow><mml:mrow><mml:mi>L</mml:mi><mml:mo>&#8290;</mml:mo><mml:mi>y</mml:mi></mml:mrow><mml:mo>=</mml:mo><mml:mrow><mml:mi>&#955;</mml:mi><mml:mo>&#8290;</mml:mo><mml:mi>D</mml:mi><mml:mo>&#8290;</mml:mo><mml:mi>y</mml:mi></mml:mrow></mml:mrow><mml:mo>.</mml:mo></mml:mrow></mml:math></disp-formula></p><p>After the spectral embedding of the graph with Laplacian eigenmaps, the force directed graph layout iteratively applies attractive and repulsive forces on the edges and vertices. The attractive force between two vertices <inline-formula><mml:math id="inf104"><mml:mi>i</mml:mi></mml:math></inline-formula> and <inline-formula><mml:math id="inf105"><mml:mi>j</mml:mi></mml:math></inline-formula> at coordinates <inline-formula><mml:math id="inf106"><mml:msub><mml:mtext>&#119858;</mml:mtext><mml:mi>i</mml:mi></mml:msub></mml:math></inline-formula> and <inline-formula><mml:math id="inf107"><mml:msub><mml:mtext>&#119858;</mml:mtext><mml:mi>j</mml:mi></mml:msub></mml:math></inline-formula> with tunable hyperparameters <inline-formula><mml:math id="inf108"><mml:mi>a</mml:mi></mml:math></inline-formula> and <inline-formula><mml:math id="inf109"><mml:mi>b</mml:mi></mml:math></inline-formula> and is determined by,<disp-formula id="equ13"><mml:math id="m13"><mml:mrow><mml:mstyle displaystyle="true"><mml:mfrac><mml:mrow><mml:mo>-</mml:mo><mml:mrow><mml:mn>2</mml:mn><mml:mo>&#8290;</mml:mo><mml:mi>a</mml:mi><mml:mo>&#8290;</mml:mo><mml:mi>b</mml:mi><mml:mo>&#8290;</mml:mo><mml:msubsup><mml:mrow><mml:mo fence="true" stretchy="false">&#8741;</mml:mo><mml:mrow><mml:msub><mml:mtext>&#119858;</mml:mtext><mml:mi>i</mml:mi></mml:msub><mml:mo>-</mml:mo><mml:msub><mml:mtext>&#119858;</mml:mtext><mml:mi>j</mml:mi></mml:msub></mml:mrow><mml:mo fence="true" stretchy="false">&#8741;</mml:mo></mml:mrow><mml:mn>2</mml:mn><mml:mrow><mml:mn>2</mml:mn><mml:mo>&#8290;</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>b</mml:mi><mml:mo>-</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:msubsup></mml:mrow></mml:mrow><mml:mrow><mml:mn>1</mml:mn><mml:mo>+</mml:mo><mml:msubsup><mml:mrow><mml:mo fence="true" stretchy="false">&#8741;</mml:mo><mml:mrow><mml:msub><mml:mtext>&#119858;</mml:mtext><mml:mi>i</mml:mi></mml:msub><mml:mo>-</mml:mo><mml:msub><mml:mtext>&#119858;</mml:mtext><mml:mi>j</mml:mi></mml:msub></mml:mrow><mml:mo fence="true" stretchy="false">&#8741;</mml:mo></mml:mrow><mml:mn>2</mml:mn><mml:mn>2</mml:mn></mml:msubsup></mml:mrow></mml:mfrac></mml:mstyle><mml:mo>&#8290;</mml:mo><mml:mi>w</mml:mi><mml:mo>&#8290;</mml:mo><mml:mrow><mml:mo maxsize="120%" minsize="120%">(</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:msub><mml:mi>x</mml:mi><mml:mi>i</mml:mi></mml:msub><mml:mo>,</mml:mo><mml:msub><mml:mi>x</mml:mi><mml:mi>j</mml:mi></mml:msub><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo maxsize="120%" minsize="120%">)</mml:mo></mml:mrow><mml:mo>&#8290;</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msub><mml:mtext>&#119858;</mml:mtext><mml:mi>i</mml:mi></mml:msub><mml:mo>-</mml:mo><mml:msub><mml:mtext>&#119858;</mml:mtext><mml:mi>j</mml:mi></mml:msub></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:math></disp-formula>and the repulsive forces with hyper-parameter <inline-formula><mml:math id="inf110"><mml:mrow><mml:mi>&#1013;</mml:mi><mml:mo>=</mml:mo><mml:mn>0.001</mml:mn></mml:mrow></mml:math></inline-formula> to prevent division by zero,<disp-formula id="equ14"><mml:math id="m14"><mml:mrow><mml:mrow><mml:mstyle displaystyle="true"><mml:mfrac><mml:mi>b</mml:mi><mml:mrow><mml:mrow><mml:mo maxsize="120%" minsize="120%">(</mml:mo><mml:mrow><mml:mi>&#1013;</mml:mi><mml:mo>+</mml:mo><mml:msubsup><mml:mrow><mml:mo fence="true" stretchy="false">&#8741;</mml:mo><mml:mrow><mml:msub><mml:mtext>&#119858;</mml:mtext><mml:mi>i</mml:mi></mml:msub><mml:mo>-</mml:mo><mml:msub><mml:mtext>&#119858;</mml:mtext><mml:mi>j</mml:mi></mml:msub></mml:mrow><mml:mo fence="true" stretchy="false">&#8741;</mml:mo></mml:mrow><mml:mn>2</mml:mn><mml:mn>2</mml:mn></mml:msubsup></mml:mrow><mml:mo maxsize="120%" minsize="120%">)</mml:mo></mml:mrow><mml:mo>&#8290;</mml:mo><mml:mrow><mml:mo maxsize="120%" minsize="120%">(</mml:mo><mml:mrow><mml:mn>1</mml:mn><mml:mo>+</mml:mo><mml:msubsup><mml:mrow><mml:mo fence="true" stretchy="false">&#8741;</mml:mo><mml:mrow><mml:msub><mml:mtext>&#119858;</mml:mtext><mml:mi>i</mml:mi></mml:msub><mml:mo>-</mml:mo><mml:msub><mml:mtext>&#119858;</mml:mtext><mml:mi>j</mml:mi></mml:msub></mml:mrow><mml:mo fence="true" stretchy="false">&#8741;</mml:mo></mml:mrow><mml:mn>2</mml:mn><mml:mn>2</mml:mn></mml:msubsup></mml:mrow><mml:mo maxsize="120%" minsize="120%">)</mml:mo></mml:mrow></mml:mrow></mml:mfrac></mml:mstyle><mml:mo>&#8290;</mml:mo><mml:mrow><mml:mo maxsize="160%" minsize="160%">(</mml:mo><mml:mrow><mml:mn>1</mml:mn><mml:mo>-</mml:mo><mml:mrow><mml:mi>w</mml:mi><mml:mo>&#8290;</mml:mo><mml:mrow><mml:mo maxsize="120%" minsize="120%">(</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:msub><mml:mi>x</mml:mi><mml:mi>i</mml:mi></mml:msub><mml:mo>,</mml:mo><mml:msub><mml:mi>x</mml:mi><mml:mi>j</mml:mi></mml:msub><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo maxsize="120%" minsize="120%">)</mml:mo></mml:mrow></mml:mrow></mml:mrow><mml:mo maxsize="160%" minsize="160%">)</mml:mo></mml:mrow><mml:mo>&#8290;</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msub><mml:mtext>&#119858;</mml:mtext><mml:mi>i</mml:mi></mml:msub><mml:mo>-</mml:mo><mml:msub><mml:mtext>&#119858;</mml:mtext><mml:mi>j</mml:mi></mml:msub></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mo>.</mml:mo></mml:mrow></mml:math></disp-formula></p><p>This optimization procedure is then completed using stochastic gradient descent to arrive at the final embedding.</p></sec></sec><sec id="s8-3"><title>Louvain method for community detection</title><p>The Louvain method for community detection (here called clustering) (<xref ref-type="bibr" rid="bib22">Blondel et al., 2008</xref>) operates on a weighted network and locates highly-interconnected nodes called a community. This &#8216;connectedness&#8217; is measured by their modularity <inline-formula><mml:math id="inf111"><mml:mi>Q</mml:mi></mml:math></inline-formula> (taking real values between &#8722;1 and 1 inclusive) with added resolution parameter <inline-formula><mml:math id="inf112"><mml:mi>t</mml:mi></mml:math></inline-formula>&#160;(<xref ref-type="bibr" rid="bib122">Newman and Girvan, 2004</xref>; <xref ref-type="bibr" rid="bib89">Lambiotte, 2007</xref>) defined as,<disp-formula id="equ15"><mml:math id="m15"><mml:mrow><mml:msub><mml:mi>Q</mml:mi><mml:mi>t</mml:mi></mml:msub><mml:mo>=</mml:mo><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>1</mml:mn><mml:mo>-</mml:mo><mml:mi>t</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>+</mml:mo><mml:mrow><mml:mstyle displaystyle="true"><mml:mfrac><mml:mn>1</mml:mn><mml:mrow><mml:mn>2</mml:mn><mml:mo>&#8290;</mml:mo><mml:mi>m</mml:mi></mml:mrow></mml:mfrac></mml:mstyle><mml:mo>&#8290;</mml:mo><mml:mrow><mml:mstyle displaystyle="true"><mml:munder><mml:mo largeop="true" movablelimits="false" symmetric="true">&#8721;</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi></mml:mrow></mml:munder></mml:mstyle><mml:mrow><mml:mrow><mml:mo maxsize="160%" minsize="160%">[</mml:mo><mml:mrow><mml:mrow><mml:mi>t</mml:mi><mml:mo>&#8290;</mml:mo><mml:msub><mml:mi>A</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mo>-</mml:mo><mml:mstyle displaystyle="true"><mml:mfrac><mml:mrow><mml:msub><mml:mi>k</mml:mi><mml:mi>i</mml:mi></mml:msub><mml:mo>&#8290;</mml:mo><mml:msub><mml:mi>k</mml:mi><mml:mi>j</mml:mi></mml:msub></mml:mrow><mml:mrow><mml:mn>2</mml:mn><mml:mo>&#8290;</mml:mo><mml:mi>m</mml:mi></mml:mrow></mml:mfrac></mml:mstyle></mml:mrow><mml:mo maxsize="160%" minsize="160%">]</mml:mo></mml:mrow><mml:mo>&#8290;</mml:mo><mml:mi>&#948;</mml:mi><mml:mo>&#8290;</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:msub><mml:mi>c</mml:mi><mml:mi>i</mml:mi></mml:msub><mml:mo>,</mml:mo><mml:msub><mml:mi>c</mml:mi><mml:mi>j</mml:mi></mml:msub><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:mrow></mml:mrow></mml:mrow></mml:mrow></mml:math></disp-formula>where <inline-formula><mml:math id="inf113"><mml:mi>t</mml:mi></mml:math></inline-formula> is a parameter controlling the &#8216;characteristic scale&#8217; of the communities (<xref ref-type="bibr" rid="bib90">Lambiotte et al., 2008</xref>). The larger the resolution parameter, the fewer the number of communities and the larger their size (<xref ref-type="fig" rid="fig3s1">Figure 3&#8212;figure supplement 1A</xref>). Smaller values of resolution parameter results in more communities smaller in size. Note that when <inline-formula><mml:math id="inf114"><mml:mrow><mml:mi>t</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:math></inline-formula>, the simplified definition of modularity is given as described&#160;in <xref ref-type="bibr" rid="bib22">Blondel et al., 2008</xref>. <inline-formula><mml:math id="inf115"><mml:msub><mml:mi>A</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> is an adjacency matrix with the weights of the edges between the nodes indexed by <inline-formula><mml:math id="inf116"><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:mstyle></mml:math></inline-formula> and <inline-formula><mml:math id="inf117"><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:mi>j</mml:mi></mml:mrow></mml:mstyle></mml:math></inline-formula> is the sum of weights of the edges connected to the node <inline-formula><mml:math id="inf118"><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:mstyle></mml:math></inline-formula>. <italic>c</italic><sub><italic>i</italic></sub> is the community to which the node <inline-formula><mml:math id="inf119"><mml:mi>i</mml:mi></mml:math></inline-formula> belongs to and the function <inline-formula><mml:math id="inf120"><mml:mrow><mml:mi>&#948;</mml:mi><mml:mo>&#8290;</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:msub><mml:mi>c</mml:mi><mml:mi>i</mml:mi></mml:msub><mml:mo>,</mml:mo><mml:msub><mml:mi>c</mml:mi><mml:mi>j</mml:mi></mml:msub><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:math></inline-formula> is an indicator function that is 1 if <inline-formula><mml:math id="inf121"><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:msub><mml:mi>c</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:msub><mml:mi>c</mml:mi><mml:mrow><mml:mi>j</mml:mi></mml:mrow></mml:msub></mml:mrow></mml:mstyle></mml:math></inline-formula> and 0 otherwise. The value <inline-formula><mml:math id="inf122"><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:mi>m</mml:mi><mml:mo>=</mml:mo><mml:mfrac><mml:mn>1</mml:mn><mml:mn>2</mml:mn></mml:mfrac><mml:munder><mml:mo>&#8721;</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi></mml:mrow></mml:munder><mml:msub><mml:mi>A</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi></mml:mrow></mml:msub></mml:mrow></mml:mstyle></mml:math></inline-formula> which is the sum of all the weights of all the edges in the network. This equation also serves as an objective function for the iterative procedure in <xref ref-type="bibr" rid="bib22">Blondel et al., 2008</xref> which proceeds in two steps: modularity optimization and community aggregation.</p><sec id="s8-3-1"><title>Modularity Optimization</title><p>Each node is assigned to its own singleton community (in the initialization step for only the first pass) and then each node is moved into a community with a random neighbor and the change in modularity, <inline-formula><mml:math id="inf123"><mml:mrow><mml:mi mathvariant="normal">&#916;</mml:mi><mml:mo>&#8290;</mml:mo><mml:msub><mml:mi>Q</mml:mi><mml:mi>t</mml:mi></mml:msub></mml:mrow></mml:math></inline-formula>, is calculated. The equation for this change in modularity is,<disp-formula id="equ16"><mml:math id="m16"><mml:mrow><mml:mrow><mml:mrow><mml:mi mathvariant="normal">&#916;</mml:mi><mml:mo>&#8290;</mml:mo><mml:msub><mml:mi>Q</mml:mi><mml:mi>t</mml:mi></mml:msub></mml:mrow><mml:mo>=</mml:mo><mml:mrow><mml:mrow><mml:mo maxsize="260%" minsize="260%">[</mml:mo><mml:mrow><mml:mstyle displaystyle="true"><mml:mfrac><mml:mrow><mml:msub><mml:mo largeop="true" symmetric="true">&#8721;</mml:mo><mml:mtext>in</mml:mtext></mml:msub><mml:mo>+</mml:mo><mml:msub><mml:mi>k</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mtext>in</mml:mtext></mml:mrow></mml:msub></mml:mrow><mml:mrow><mml:mn>2</mml:mn><mml:mo>&#8290;</mml:mo><mml:mi>m</mml:mi></mml:mrow></mml:mfrac></mml:mstyle><mml:mo>-</mml:mo><mml:mrow><mml:mo maxsize="260%" minsize="260%">(</mml:mo><mml:msup><mml:mstyle displaystyle="true"><mml:mfrac><mml:mrow><mml:msub><mml:mo largeop="true" symmetric="true">&#8721;</mml:mo><mml:mtext>tot</mml:mtext></mml:msub><mml:mo>+</mml:mo><mml:msub><mml:mi>k</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:mrow><mml:mrow><mml:mn>2</mml:mn><mml:mo>&#8290;</mml:mo><mml:mi>m</mml:mi></mml:mrow></mml:mfrac></mml:mstyle><mml:mn>2</mml:mn></mml:msup><mml:mo maxsize="260%" minsize="260%">)</mml:mo></mml:mrow></mml:mrow><mml:mo maxsize="260%" minsize="260%">]</mml:mo></mml:mrow><mml:mo>-</mml:mo><mml:mrow><mml:mo maxsize="260%" minsize="260%">[</mml:mo><mml:mrow><mml:mstyle displaystyle="true"><mml:mfrac><mml:msub><mml:mo largeop="true" symmetric="true">&#8721;</mml:mo><mml:mtext>in</mml:mtext></mml:msub><mml:mrow><mml:mn>2</mml:mn><mml:mo>&#8290;</mml:mo><mml:mi>m</mml:mi></mml:mrow></mml:mfrac></mml:mstyle><mml:mo>-</mml:mo><mml:mrow><mml:mo maxsize="260%" minsize="260%">(</mml:mo><mml:mstyle displaystyle="true"><mml:mfrac><mml:msub><mml:mo largeop="true" symmetric="true">&#8721;</mml:mo><mml:mtext>tot</mml:mtext></mml:msub><mml:mrow><mml:mn>2</mml:mn><mml:mo>&#8290;</mml:mo><mml:mi>m</mml:mi></mml:mrow></mml:mfrac></mml:mstyle><mml:mo maxsize="260%" minsize="260%">)</mml:mo></mml:mrow><mml:mo>-</mml:mo><mml:msup><mml:mrow><mml:mo maxsize="260%" minsize="260%">(</mml:mo><mml:mstyle displaystyle="true"><mml:mfrac><mml:msub><mml:mi>k</mml:mi><mml:mi>i</mml:mi></mml:msub><mml:mrow><mml:mn>2</mml:mn><mml:mo>&#8290;</mml:mo><mml:mi>m</mml:mi></mml:mrow></mml:mfrac></mml:mstyle><mml:mo maxsize="260%" minsize="260%">)</mml:mo></mml:mrow><mml:mn>2</mml:mn></mml:msup></mml:mrow><mml:mo maxsize="260%" minsize="260%">]</mml:mo></mml:mrow></mml:mrow></mml:mrow><mml:mo>.</mml:mo></mml:mrow></mml:math></disp-formula>where <inline-formula><mml:math id="inf124"><mml:msub><mml:mo largeop="true" symmetric="true">&#8721;</mml:mo><mml:mtext>in</mml:mtext></mml:msub></mml:math></inline-formula> is the sum of all the weights inside the community that the node <inline-formula><mml:math id="inf125"><mml:mi>i</mml:mi></mml:math></inline-formula> is moving into. <inline-formula><mml:math id="inf126"><mml:msub><mml:mo largeop="true" symmetric="true">&#8721;</mml:mo><mml:mtext>tot</mml:mtext></mml:msub></mml:math></inline-formula> is the sum of all the weights of the edges to nodes in the community <inline-formula><mml:math id="inf127"><mml:mi>i</mml:mi></mml:math></inline-formula> is moving into. <italic>k</italic><sub><italic>i</italic></sub> is the sum of all the weighted edges incident on <inline-formula><mml:math id="inf128"><mml:mi>i</mml:mi></mml:math></inline-formula>. <inline-formula><mml:math id="inf129"><mml:msub><mml:mi>k</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mtext>in</mml:mtext></mml:mrow></mml:msub></mml:math></inline-formula> is the sum of the weights from the edges of <inline-formula><mml:math id="inf130"><mml:mi>i</mml:mi></mml:math></inline-formula> to nodes in the cluster. Once the change in modularity is caclulated for a node before and after joining a neighboring cluster, the neighbor joins (or stays with) the community with the largest positive increase in modularity; if no increase can be found, the node remains a part of its current community. Once there can be found no increase in modularity for any nodes, the algorithm proceeds to the second step.</p></sec><sec id="s8-3-2"><title>Community Aggregation</title><p>Every node in each community in the previous step is then collapsed into a single node and their edges summed to form a new graph. The process is then repeated from the previous step. This process repeats until the graph with maximum modularity is found and each original node is assigned to a final cluster membership.</p><p>The graph produced by UMAP was passed into this Louvain clustering algorithm (<xref ref-type="bibr" rid="bib22">Blondel et al., 2008</xref>) using cylouvain 0.2.3 with parameters in <xref ref-type="table" rid="table1">Table 1</xref>. This clustering method requires no prior specification of the number of clusters that should be present but its number does depend on the resolution parameter. To choose this parameter, a sensitivity analysis was conducted across various values of the resolution parameter with the number of communities and total modularity compared <xref ref-type="fig" rid="fig3">Figure 3B</xref>. Each waveform was then plotted in UMAP space and color-coded to its associated cluster label found by Louvain Clustering <xref ref-type="fig" rid="fig3">Figure 3A</xref>.</p></sec></sec></sec><sec id="s9" sec-type="appendix"><title>Ensemble clustering for graphs (ECG)</title><p>ECG is a consensus clustering method for graphs and was used to validate the Louvain clustering algorithm (<xref ref-type="bibr" rid="bib132">Poulin and Th&#233;berge, 2018</xref>; <xref ref-type="bibr" rid="bib133">Poulin and Th&#233;berge, 2019</xref>). ECG consists of two steps: generation and integration.</p><sec id="s9-1"><title>Generation</title><p>This step instantiates the ECG algorithm by using Louvain clustering to produce a randomized set of <inline-formula><mml:math id="inf131"><mml:mi>k</mml:mi></mml:math></inline-formula> level-1 partitions <inline-formula><mml:math id="inf132"><mml:mrow><mml:mi class="ltx_font_mathcaligraphic">&#119979;</mml:mi><mml:mo>&#8712;</mml:mo><mml:mrow><mml:mo stretchy="false">{</mml:mo><mml:msub><mml:mi class="ltx_font_mathcaligraphic">&#119979;</mml:mi><mml:mn class="ltx_font_mathcaligraphic" mathvariant="script">1</mml:mn></mml:msub><mml:mo>,</mml:mo><mml:mi mathvariant="normal">&#8230;</mml:mi><mml:mo>,</mml:mo><mml:msub><mml:mi class="ltx_font_mathcaligraphic">&#119979;</mml:mi><mml:mi class="ltx_font_mathcaligraphic">&#120000;</mml:mi></mml:msub><mml:mo stretchy="false">}</mml:mo></mml:mrow></mml:mrow></mml:math></inline-formula> (the level-1 refers to only computing the first pass of Louvain). The randomization comes from the randomization of vertices in the initialization step of Louvain clustering.</p></sec><sec id="s9-2"><title>Integration</title><p>Once the <inline-formula><mml:math id="inf133"><mml:mi>k</mml:mi></mml:math></inline-formula> randomized level-1 Louvain partitions are obtained, Louvain is run on a weighted version of the initial graph <inline-formula><mml:math id="inf134"><mml:mrow><mml:mi>G</mml:mi><mml:mo>=</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>V</mml:mi><mml:mo>,</mml:mo><mml:mi>E</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:math></inline-formula>. These weights <inline-formula><mml:math id="inf135"><mml:mrow><mml:msub><mml:mi>W</mml:mi><mml:mi class="ltx_font_mathcaligraphic">&#119979;</mml:mi></mml:msub><mml:mo>&#8290;</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>u</mml:mi><mml:mo>,</mml:mo><mml:mi>v</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:math></inline-formula> are obtained via co-association of edges <inline-formula><mml:math id="inf136"><mml:mrow><mml:mi>e</mml:mi><mml:mo>=</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>u</mml:mi><mml:mo>,</mml:mo><mml:mi>v</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>&#8712;</mml:mo><mml:mi>E</mml:mi></mml:mrow></mml:math></inline-formula>. These weights are defined as,<disp-formula id="equ17"><mml:math id="m17"><mml:mrow><mml:mrow><mml:msub><mml:mi>W</mml:mi><mml:mi class="ltx_font_mathcaligraphic">&#119979;</mml:mi></mml:msub><mml:mo>&#8290;</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>u</mml:mi><mml:mo>,</mml:mo><mml:mi>v</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mo>&#8796;</mml:mo><mml:mrow><mml:mo>{</mml:mo><mml:mtable columnspacing="5pt" rowspacing="0pt"><mml:mtr><mml:mtd columnalign="left"><mml:mrow><mml:msub><mml:mi>w</mml:mi><mml:mo>&#8727;</mml:mo></mml:msub><mml:mo>+</mml:mo><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>1</mml:mn><mml:mo>-</mml:mo><mml:msub><mml:mi>w</mml:mi><mml:mo>&#8727;</mml:mo></mml:msub></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>&#8901;</mml:mo><mml:mrow><mml:mo maxsize="210%" minsize="210%">(</mml:mo><mml:mfrac><mml:mrow><mml:mstyle displaystyle="false"><mml:msubsup><mml:mo largeop="true" symmetric="true">&#8721;</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mi>k</mml:mi></mml:msubsup></mml:mstyle><mml:mrow><mml:msub><mml:mi>v</mml:mi><mml:msub><mml:mi class="ltx_font_mathcaligraphic">&#119979;</mml:mi><mml:mi class="ltx_font_mathcaligraphic">&#119998;</mml:mi></mml:msub></mml:msub><mml:mo>&#8290;</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>u</mml:mi><mml:mo>,</mml:mo><mml:mi>v</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:mrow><mml:mi>k</mml:mi></mml:mfrac><mml:mo maxsize="210%" minsize="210%">)</mml:mo></mml:mrow></mml:mrow></mml:mrow></mml:mtd><mml:mtd columnalign="left"><mml:mrow><mml:mo>,</mml:mo><mml:mrow> <mml:mtext>if </mml:mtext><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>u</mml:mi><mml:mo>,</mml:mo><mml:mi>v</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow> <mml:mtext>is in the 2-core of </mml:mtext><mml:mi>G</mml:mi></mml:mrow></mml:mrow></mml:mtd></mml:mtr><mml:mtr><mml:mtd columnalign="left"><mml:msub><mml:mi>w</mml:mi><mml:mo>&#8727;</mml:mo></mml:msub></mml:mtd><mml:mtd columnalign="left"><mml:mrow><mml:mo>,</mml:mo> <mml:mtext>otherwise</mml:mtext></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:mrow></mml:mrow></mml:math></disp-formula>where we have the minimum ECG weight <inline-formula><mml:math id="inf137"><mml:mrow><mml:mn>0</mml:mn><mml:mo>&lt;</mml:mo><mml:msub><mml:mi>w</mml:mi><mml:mo>&#8727;</mml:mo></mml:msub><mml:mo>&lt;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:math></inline-formula> and the co-occurence of edges <inline-formula><mml:math id="inf138"><mml:mi>u</mml:mi></mml:math></inline-formula> and <inline-formula><mml:math id="inf139"><mml:mi>v</mml:mi></mml:math></inline-formula> as <inline-formula><mml:math id="inf140"><mml:mrow><mml:msub><mml:mi>v</mml:mi><mml:msub><mml:mi>P</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:msub><mml:mo>=</mml:mo><mml:mrow><mml:msubsup><mml:mo largeop="true" symmetric="true">&#8721;</mml:mo><mml:mrow><mml:mi>j</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:msub><mml:mi>l</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:msubsup><mml:mrow><mml:mrow><mml:mrow><mml:msub><mml:mn>&#120793;</mml:mn><mml:msubsup><mml:mi>C</mml:mi><mml:mi>i</mml:mi><mml:mi>j</mml:mi></mml:msubsup></mml:msub><mml:mo>&#8290;</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>u</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mo>&#8901;</mml:mo><mml:msub><mml:mn>&#120793;</mml:mn><mml:msubsup><mml:mi>C</mml:mi><mml:mi>i</mml:mi><mml:mi>j</mml:mi></mml:msubsup></mml:msub></mml:mrow><mml:mo>&#8290;</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>v</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:mrow></mml:mrow></mml:math></inline-formula> where <inline-formula><mml:math id="inf141"><mml:mrow><mml:msub><mml:mn>&#120793;</mml:mn><mml:msubsup><mml:mi>C</mml:mi><mml:mi>i</mml:mi><mml:mi>j</mml:mi></mml:msubsup></mml:msub><mml:mo>&#8290;</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>u</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:math></inline-formula> is an indicator function of if the edge <inline-formula><mml:math id="inf142"><mml:mi>u</mml:mi></mml:math></inline-formula> occurs in the cluster of <inline-formula><mml:math id="inf143"><mml:msub><mml:mi>P</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:math></inline-formula> or not.</p><p>With this function, ECG combines these level-1 Louvain partitions as a single weighted graph which serves as the result.</p><sec id="s9-2-1"><title>SHapley Additive exPlanations (SHAP)</title><p>SHAP values build off of Shapley values (<xref ref-type="bibr" rid="bib150">Shapley, 1988</xref>) and provides interpretability to machine learning models by computing the contributions of each feature towards the overall model. These explanations of machine learning models are models in and of themselves and are referred to as &#8216;additive feature attribution methods&#8217;. These explanation models use simplified inputs <inline-formula><mml:math id="inf144"><mml:msup><mml:mi>x</mml:mi><mml:mo>&#8242;</mml:mo></mml:msup></mml:math></inline-formula> which are mapped to original inputs through a function <inline-formula><mml:math id="inf145"><mml:mrow><mml:mi>x</mml:mi><mml:mo>=</mml:mo><mml:mrow><mml:msub><mml:mi>h</mml:mi><mml:mi>x</mml:mi></mml:msub><mml:mo>&#8290;</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:msup><mml:mi>x</mml:mi><mml:mo>&#8242;</mml:mo></mml:msup><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:mrow></mml:math></inline-formula> and try to ensure that <inline-formula><mml:math id="inf146"><mml:mrow><mml:mrow><mml:mi>g</mml:mi><mml:mo>&#8290;</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:msup><mml:mi>z</mml:mi><mml:mo>&#8242;</mml:mo></mml:msup><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mo>&#8776;</mml:mo><mml:mrow><mml:mi>f</mml:mi><mml:mo>&#8290;</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msub><mml:mi>h</mml:mi><mml:mi>x</mml:mi></mml:msub><mml:mo>&#8290;</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:msup><mml:mi>z</mml:mi><mml:mo>&#8242;</mml:mo></mml:msup><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:mrow></mml:math></inline-formula> whenever <inline-formula><mml:math id="inf147"><mml:mrow><mml:msup><mml:mi>z</mml:mi><mml:mo>&#8242;</mml:mo></mml:msup><mml:mo>&#8776;</mml:mo><mml:msup><mml:mi>x</mml:mi><mml:mo>&#8242;</mml:mo></mml:msup></mml:mrow></mml:math></inline-formula> where <inline-formula><mml:math id="inf148"><mml:mi>f</mml:mi></mml:math></inline-formula> is the machine learning model and <inline-formula><mml:math id="inf149"><mml:mi>g</mml:mi></mml:math></inline-formula> is the explanation model. This yields the additive form which is a linear combination of binary variables:<disp-formula id="equ18"><mml:math id="m18"><mml:mrow><mml:mrow><mml:mi>g</mml:mi><mml:mo>&#8290;</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:msup><mml:mi>z</mml:mi><mml:mo>&#8242;</mml:mo></mml:msup><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mo>=</mml:mo><mml:mrow><mml:msub><mml:mi>&#981;</mml:mi><mml:mn>0</mml:mn></mml:msub><mml:mo>+</mml:mo><mml:mrow><mml:mstyle displaystyle="true"><mml:munderover><mml:mo largeop="true" movablelimits="false" symmetric="true">&#8721;</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mi>M</mml:mi></mml:munderover></mml:mstyle><mml:mrow><mml:msub><mml:mi>&#981;</mml:mi><mml:mi>i</mml:mi></mml:msub><mml:mo>&#8290;</mml:mo><mml:msubsup><mml:mi>z</mml:mi><mml:mi>i</mml:mi><mml:mo>&#8242;</mml:mo></mml:msubsup></mml:mrow></mml:mrow></mml:mrow></mml:mrow></mml:math></disp-formula>where <inline-formula><mml:math id="inf150"><mml:mrow><mml:msup><mml:mi>z</mml:mi><mml:mo>&#8242;</mml:mo></mml:msup><mml:mo>&#8712;</mml:mo><mml:msup><mml:mrow><mml:mo stretchy="false">{</mml:mo><mml:mn>0</mml:mn><mml:mo>,</mml:mo><mml:mn>1</mml:mn><mml:mo stretchy="false">}</mml:mo></mml:mrow><mml:mi>M</mml:mi></mml:msup></mml:mrow></mml:math></inline-formula> is the binary value specifying the inclusion or exclusion of a number of simplified feature inputs <inline-formula><mml:math id="inf151"><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:mi>M</mml:mi></mml:mrow></mml:mstyle></mml:math></inline-formula> is the effect of each feature.</p><p>Work in <xref ref-type="bibr" rid="bib98">Lundberg and Lee, 2017</xref> devises such a model satisfying three important properties within this framework:</p><sec id="s9-2-1-1"><title>Local accuracy/efficiency</title><p>the explanation&#8217;s features with their effects <inline-formula><mml:math id="inf152"><mml:msub><mml:mi>&#981;</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:math></inline-formula> must sum for each feature <inline-formula><mml:math id="inf153"><mml:mi>i</mml:mi></mml:math></inline-formula> to the output <inline-formula><mml:math id="inf154"><mml:mrow><mml:mi>f</mml:mi><mml:mo>&#8290;</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>x</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:math></inline-formula>.<disp-formula id="equ19"><mml:math id="m19"><mml:mrow><mml:mrow><mml:mi>f</mml:mi><mml:mo>&#8290;</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>x</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mo>=</mml:mo><mml:mrow><mml:mrow><mml:msub><mml:mi>&#981;</mml:mi><mml:mn>0</mml:mn></mml:msub><mml:mo>&#8290;</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>f</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mo>+</mml:mo><mml:mrow><mml:mstyle displaystyle="true"><mml:munderover><mml:mo largeop="true" movablelimits="false" symmetric="true">&#8721;</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mi>M</mml:mi></mml:munderover></mml:mstyle><mml:mrow><mml:msub><mml:mi>&#981;</mml:mi><mml:mi>i</mml:mi></mml:msub><mml:mo>&#8290;</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>f</mml:mi><mml:mo>,</mml:mo><mml:mi>x</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:mrow></mml:mrow></mml:mrow></mml:math></disp-formula>where <inline-formula><mml:math id="inf155"><mml:mrow><mml:mrow><mml:msub><mml:mi>&#981;</mml:mi><mml:mn>0</mml:mn></mml:msub><mml:mo>&#8290;</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>f</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mo>=</mml:mo><mml:mrow><mml:mi>&#120124;</mml:mi><mml:mo>&#8290;</mml:mo><mml:mrow><mml:mo maxsize="120%" minsize="120%">[</mml:mo><mml:mrow><mml:mi>f</mml:mi><mml:mo>&#8290;</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>z</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mo maxsize="120%" minsize="120%">]</mml:mo></mml:mrow></mml:mrow><mml:mo>=</mml:mo><mml:mrow><mml:msub><mml:mi>f</mml:mi><mml:mi>x</mml:mi></mml:msub><mml:mo>&#8290;</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi mathvariant="normal">&#8709;</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:mrow></mml:math></inline-formula></p></sec><sec id="s9-2-1-2"><title>Consistency/Monotonicity</title><p>If a model changes so that the effect of a feature increases of stays the same regardless of other inputs, that input&#8217;s attribution should not decrease. For any two models <inline-formula><mml:math id="inf156"><mml:mi>f</mml:mi></mml:math></inline-formula> and <inline-formula><mml:math id="inf157"><mml:msup><mml:mi>f</mml:mi><mml:mo>&#8242;</mml:mo></mml:msup></mml:math></inline-formula> if,<disp-formula id="equ20"><mml:math id="m20"><mml:mrow><mml:mrow><mml:mrow><mml:msubsup><mml:mi>f</mml:mi><mml:mi>x</mml:mi><mml:mo>&#8242;</mml:mo></mml:msubsup><mml:mo>&#8290;</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>S</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mo>-</mml:mo><mml:mrow><mml:msubsup><mml:mi>f</mml:mi><mml:mi>x</mml:mi><mml:mo>&#8242;</mml:mo></mml:msubsup><mml:mo>&#8290;</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>S</mml:mi><mml:mo>&#8290;</mml:mo><mml:mi mathvariant="normal">\</mml:mi><mml:mo>&#8290;</mml:mo><mml:mi>i</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:mrow><mml:mo>&#8805;</mml:mo><mml:mrow><mml:mrow><mml:msub><mml:mi>f</mml:mi><mml:mi>x</mml:mi></mml:msub><mml:mo>&#8290;</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>S</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mo>-</mml:mo><mml:mrow><mml:msub><mml:mi>f</mml:mi><mml:mi>x</mml:mi></mml:msub><mml:mo>&#8290;</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>S</mml:mi><mml:mo>&#8290;</mml:mo><mml:mi mathvariant="normal">\</mml:mi><mml:mo>&#8290;</mml:mo><mml:mi>i</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:mrow></mml:mrow></mml:math></disp-formula>where <inline-formula><mml:math id="inf158"><mml:mrow><mml:mi>S</mml:mi><mml:mo>&#8712;</mml:mo><mml:mi class="ltx_font_mathcaligraphic">&#8497;</mml:mi></mml:mrow></mml:math></inline-formula> are subsets of all features and <inline-formula><mml:math id="inf159"><mml:mrow><mml:mi>S</mml:mi><mml:mo>&#8290;</mml:mo><mml:mi mathvariant="normal">\</mml:mi><mml:mo>&#8290;</mml:mo><mml:mi>i</mml:mi></mml:mrow></mml:math></inline-formula> is the setting of feature <inline-formula><mml:math id="inf160"><mml:mi>i</mml:mi></mml:math></inline-formula> to zero (or some background reference value intended to be negligible) then <inline-formula><mml:math id="inf161"><mml:mrow><mml:mrow><mml:msub><mml:mi>&#981;</mml:mi><mml:mi>i</mml:mi></mml:msub><mml:mo>&#8290;</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:msup><mml:mi>f</mml:mi><mml:mo>&#8242;</mml:mo></mml:msup><mml:mo>,</mml:mo><mml:mi>x</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mo>&#8805;</mml:mo><mml:mrow><mml:msub><mml:mi>&#981;</mml:mi><mml:mi>i</mml:mi></mml:msub><mml:mo>&#8290;</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>f</mml:mi><mml:mo>,</mml:mo><mml:mi>x</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:mrow></mml:math></inline-formula>.</p></sec><sec id="s9-2-1-3"><title>Missingness</title><p>This is the idea that features with no effect on <italic>f</italic><sub><italic>x</italic></sub> should have no assigned impact <inline-formula><mml:math id="inf162"><mml:msub><mml:mi>&#981;</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:math></inline-formula>. This is expressed as,<disp-formula id="equ21"><mml:math id="m21"><mml:mrow><mml:mrow><mml:msub><mml:mi>f</mml:mi><mml:mi>x</mml:mi></mml:msub><mml:mo>&#8290;</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>S</mml:mi><mml:mo>&#8746;</mml:mo><mml:mi>i</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mo>=</mml:mo><mml:mrow><mml:msub><mml:mi>f</mml:mi><mml:mi>x</mml:mi></mml:msub><mml:mo>&#8290;</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>S</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:mrow></mml:math></disp-formula>for all subsets of features <inline-formula><mml:math id="inf163"><mml:mrow><mml:mi>S</mml:mi><mml:mo>&#8712;</mml:mo><mml:mi class="ltx_font_mathcaligraphic">&#8497;</mml:mi></mml:mrow></mml:math></inline-formula>, then <inline-formula><mml:math id="inf164"><mml:mrow><mml:mrow><mml:msub><mml:mi>&#981;</mml:mi><mml:mi>i</mml:mi></mml:msub><mml:mo>&#8290;</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>f</mml:mi><mml:mo>,</mml:mo><mml:mi>x</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mo>=</mml:mo><mml:mn>0</mml:mn></mml:mrow></mml:math></inline-formula>.</p><p>The authors prove that the only possible additive feature attribution method that satisfies these three criteria is SHAP whose values are computed as the following,<disp-formula id="equ22"><mml:math id="m22"><mml:mrow><mml:mrow><mml:msub><mml:mi>&#981;</mml:mi><mml:mi>i</mml:mi></mml:msub><mml:mo>&#8290;</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>f</mml:mi><mml:mo>,</mml:mo><mml:mi>x</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mo>=</mml:mo><mml:mrow><mml:mstyle displaystyle="true"><mml:munder><mml:mo largeop="true" movablelimits="false" symmetric="true">&#8721;</mml:mo><mml:mrow><mml:mi>R</mml:mi><mml:mo>&#8712;</mml:mo><mml:mi>&#8477;</mml:mi></mml:mrow></mml:munder></mml:mstyle><mml:mrow><mml:mstyle displaystyle="true"><mml:mfrac><mml:mn>1</mml:mn><mml:mrow><mml:mi>M</mml:mi><mml:mo>!</mml:mo></mml:mrow></mml:mfrac></mml:mstyle><mml:mo>&#8290;</mml:mo><mml:mrow><mml:mo maxsize="210%" minsize="210%">[</mml:mo><mml:mrow><mml:mrow><mml:msub><mml:mi>f</mml:mi><mml:mi>x</mml:mi></mml:msub><mml:mo>&#8290;</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msubsup><mml:mi>P</mml:mi><mml:mi>i</mml:mi><mml:mi>R</mml:mi></mml:msubsup><mml:mo>&#8746;</mml:mo><mml:mi>i</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mo>-</mml:mo><mml:mrow><mml:msub><mml:mi>f</mml:mi><mml:mi>x</mml:mi></mml:msub><mml:mo>&#8290;</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:msubsup><mml:mi>P</mml:mi><mml:mi>i</mml:mi><mml:mi>R</mml:mi></mml:msubsup><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:mrow><mml:mo maxsize="210%" minsize="210%">]</mml:mo></mml:mrow></mml:mrow></mml:mrow></mml:mrow></mml:math></disp-formula>where <inline-formula><mml:math id="inf165"><mml:mi>R</mml:mi></mml:math></inline-formula> is the set of all feature orderings and <inline-formula><mml:math id="inf166"><mml:msubsup><mml:mi>P</mml:mi><mml:mi>i</mml:mi><mml:mi>R</mml:mi></mml:msubsup></mml:math></inline-formula> is the set of all features that come before the <inline-formula><mml:math id="inf167"><mml:msup><mml:mi>i</mml:mi><mml:mtext>th</mml:mtext></mml:msup></mml:math></inline-formula> one in ordering <inline-formula><mml:math id="inf168"><mml:mi>R</mml:mi></mml:math></inline-formula> and <inline-formula><mml:math id="inf169"><mml:mi>M</mml:mi></mml:math></inline-formula> is the number of input features.</p><p>Extending SHAP values to tree classifiers, the authors create shap.TreeExplainer (<xref ref-type="bibr" rid="bib97">Lundberg et al., 2020</xref>) to calculate SHAP values by using path-dependent feature perturbation to yield the plots in <xref ref-type="fig" rid="fig5">Figure 5B and C</xref>.</p></sec></sec><sec id="s9-2-2"><title><italic>WaveMAP</italic> is stable over parameter choice with respect to random seed and data bootstrap</title><sec id="s9-2-2-1"><title>Parameter Choice</title><p>Louvain clustering (<xref ref-type="bibr" rid="bib22">Blondel et al., 2008</xref>) requires the specification of a resolution parameter, <inline-formula><mml:math id="inf170"><mml:mi>t</mml:mi></mml:math></inline-formula>, which controls the &#8216;characteristic scale&#8217; by which network communities are identified; the larger this parameter, the fewer the number of clusters (communities) detected and vice versa (<xref ref-type="bibr" rid="bib89">Lambiotte, 2007</xref>).</p><p>We selected a resolution parameter based on two factors. The most important factor was modularity score. Modularity (the &#8216;connectedness&#8217; of a community, see Materials and methods) is a community-wise measure defined as the difference between the weights of the edges within a cluster and the edges incoming from any other node outside of the cluster. Maximizing this value over the whole graph finds communities with high amounts of intra-connectivity and low out-connectivity. The second factor we considered was the sizes of the resulting clusters after choosing a resolution parameter. We did not want clusters with too few members (n &lt; 20 which would be statistically difficult to interpret). The regions with the highest modularity score were at a resolution parameter of 1 and decreased from there onwards. However, choosing a resolution parameter of 1 led to a large number of clusters which often had very few members (<xref ref-type="fig" rid="fig3s1">Figure 3&#8212;figure supplement 1A</xref>, leftmost column) making downstream statistical comparisons underpowered. We therefore chose <inline-formula><mml:math id="inf171"><mml:mi>t</mml:mi></mml:math></inline-formula> to be 1.5 which resulted in the next best average modularity score of <inline-formula><mml:math id="inf172"><mml:mrow><mml:mn>0.761</mml:mn><mml:mo>&#177;</mml:mo><mml:mn>0.004</mml:mn></mml:mrow></mml:math></inline-formula> (mean &#177; S.D.) and an average of <inline-formula><mml:math id="inf173"><mml:mrow><mml:mn>8.29</mml:mn><mml:mo>&#177;</mml:mo><mml:mn>0.84</mml:mn></mml:mrow></mml:math></inline-formula> (mean &#177; S.D.) clusters across 25 random data permutations (<xref ref-type="fig" rid="fig3">Figure 3B</xref>). In this manner, we found a set of waveform clusters that balanced the diversity found by UMAP and statistical interpretability.</p></sec><sec id="s9-2-2-2"><title>Random seed</title><p>To show this hierarchy of clustering resolutions along the curve in <xref ref-type="fig" rid="fig3">Figure 3C</xref> and to demonstrate <italic>WaveMAP</italic>&#8217;s robustness to random seed initialization, we plotted three different plots for several different resolution parameters in <xref ref-type="fig" rid="fig3s1">Figure 3&#8212;figure supplement 1A</xref>. Each random seed produced the same clustering with only slight perturbations of scale and rotation. To validate these clusters were reliable and not an artifact of our particular data sample, we counted the number of clusters from 100 randomly permuted subsets of the full dataset at varying proportions (from 10% to 100% in 10% increments) and also set each with a different UMAP random seed (<xref ref-type="fig" rid="fig3s1">Figure 3&#8212;figure supplement 1B</xref>, in red). As the data portion increased, we found that the number of clusters increased then tapered off to around eight clusters at &#8764;60% of the full dataset. In the same manner, we also calculated the adjusted mutual information score (AMI) (<xref ref-type="bibr" rid="bib168">Timme and Lapish, 2018</xref>) between a subset of the data and the full dataset (<xref ref-type="fig" rid="fig3s1">Figure 3&#8212;figure supplement 1B</xref>, in green). This is a measure of how closely two sets of clusterings agree with each other intuitively interpreted as how &#8216;informative&#8217; knowing one potential clustering is for predicting another. Just as with the number of clusters, AMI increases steeply until around 40% at a score of 0.8 of the dataset and then slowly increased. This analysis is reassuring and demonstrates that we have an adequate number of waveforms to yield consistent clustering with which to describe the diversity of cell types in monkey PMd.</p></sec><sec id="s9-2-2-3"><title>Stability</title><p>Louvain clustering is sometimes unstable. That is, results from successive runs on the same data can show considerable variation on some datasets (<xref ref-type="bibr" rid="bib133">Poulin and Th&#233;berge, 2019</xref>). To test whether these eight clusters consistently contained the same constituent data points run-to-run, we used ensemble clustering for graphs (ECG) (<xref ref-type="bibr" rid="bib132">Poulin and Th&#233;berge, 2018</xref>; <xref ref-type="bibr" rid="bib133">Poulin and Th&#233;berge, 2019</xref>). ECG generates <inline-formula><mml:math id="inf174"><mml:mi>k</mml:mi></mml:math></inline-formula> randomized level-1 (one round of Louvain clustering, <xref ref-type="fig" rid="fig2s1">Figure 2&#8212;figure supplement 1B</xref>) partitions and combines together their graph structure via the co-occurrence of edges between nodes across partitionings. Hence the &#8216;ensemble&#8217; in the name (also called &#8216;consensus clustering&#8217;). Performing ECG with <inline-formula><mml:math id="inf175"><mml:mi>k</mml:mi></mml:math></inline-formula> = 10, 100 times on UMAP graphs with different random seeds produced an average of <inline-formula><mml:math id="inf176"><mml:mrow><mml:mn>8.87</mml:mn><mml:mo>&#177;</mml:mo><mml:mn>0.74</mml:mn></mml:mrow></mml:math></inline-formula> (mean &#177; S.D.) clusters which was similar to that found by Louvain clustering with resolution parameter set to 1.5. In addition, the runs of ECG that yielded eight clusters had an almost exact structure to that produced by <italic>WaveMAP</italic> (compare <xref ref-type="fig" rid="fig3s1">Figure 3&#8212;figure supplement 1C</xref> to <xref ref-type="fig" rid="fig3">Figure 3A</xref>). The runs of ECG with more than eight clusters contained small clusters (n &#8804; 20) splitting off from which were too small to allow us to make rigorous conclusions statistically. We therefore chose the more conservative eight cluster solution that balanced maximizing cluster number while ensuring adequate cluster sizes.</p></sec></sec></sec></sec><sec id="s10" sec-type="appendix"><title>WaveMAP&#8217;s robustness comes from clustering in high-dimension rather than the projected space</title><p>Another common approach to clustering high-dimensional data has been to cluster in the embedded space, that is, clustering in the lower-dimensional projected space of a dimensionality reduction method. While this approach is successful for similar applications such as spike sorting (<xref ref-type="bibr" rid="bib43">Dimitriadis et al., 2018</xref>; <xref ref-type="bibr" rid="bib100">Mahallati et al., 2019</xref>; <xref ref-type="bibr" rid="bib64">Harris et al., 2000</xref>; <xref ref-type="bibr" rid="bib24">Chah et al., 2011</xref>), we found it to be more unstable when used for our use case of classifying waveform types (<xref ref-type="fig" rid="fig4s1">Figure 4&#8212;figure supplement 1</xref>). Using the robustness analysis in <xref ref-type="fig" rid="fig3s1">Figure 3&#8212;figure supplement 1B</xref>, we compared <italic>WaveMAP</italic> against both DBSCAN (<xref ref-type="bibr" rid="bib52">Ester et al., 1996</xref>) on the embedded 2-dimensional t-SNE (<xref ref-type="bibr" rid="bib99">Maaten and Hinton, 2008</xref>) space (DBSCAN on t-SNE) and a GMM on the projected space formed by the first three principal components of the waveform data (94% variance explained). As before, we first applied each method to the full dataset to construct a &#8216;reference&#8217; to compare against. Thus each method is compared only to its own reference as to facilitate a fair head-to-head comparison.</p><p>DBSCAN on t-SNE and the GMM on PCA formed what seemed to be reasonable clusterings of the data with DBSCAN on t-SNE forming clusters similar to <italic>WaveMAP</italic> (<xref ref-type="fig" rid="fig4s1">Figure 4&#8212;figure supplement 1A</xref>, top) and GMM on PCA forming four clusters just as in <xref ref-type="fig" rid="fig4">Figure 4B</xref> albeit in PCA- and not the feature-space (<xref ref-type="fig" rid="fig4s1">Figure 4&#8212;figure supplement 1A</xref>, bottom). When analyzing the AMI of clusterings across data bootstraps, <italic>WaveMAP</italic>&#8217;s AMI was moderate (&#8764;0.5) even at low percentages of the full dataset and quickly increased to high values (&gt; 0.8) from about 50% of the dataset onwards (<xref ref-type="fig" rid="fig4s1">Figure 4&#8212;figure supplement 1B</xref>, in blue). However, when we conducted our robustness analysis on the other methods, both of them proved to be less stable (<xref ref-type="fig" rid="fig4s1">Figure 4&#8212;figure supplement 1B</xref>, green and red). With DBSCAN on t-SNE, although it was able to reach high AMI values when the data fraction increased past 75%, it was low to moderate before this point. In this regime of high data fraction, many of the points between random samples were shared and so much of this stability may be due to similarity in the data (when the fraction of the full dataset is high, most of the data is the same between random samples). Ideally, a method should be able to pick up on structure predictive of out-of-sample data at low data fractions i.e. results should generalize. With the GMM on PCA approach, AMI started out about as high as <italic>WaveMAP</italic> but failed to increase as more data was included. Examining the individual samples, we see that at 40% of the full dataset (<xref ref-type="fig" rid="fig4s1">Figure 4&#8212;figure supplement 1C</xref>, left), a continuum of AMI values are occupied from low to high. At 90% of the full dataset (<xref ref-type="fig" rid="fig4s1">Figure 4&#8212;figure supplement 1C</xref>, right), the AMI scores cluster around different locations and overall occupying the same range as at 40%. This suggests that the GMM on PCA, having nearly the full dataset, is converging to one of several solutions (local minima) with some of them being highly suboptimal.</p></sec></boxed-text></app></app-group></back><sub-article article-type="decision-letter" id="sa1"><front-stub><article-id pub-id-type="doi">10.7554/eLife.67490.sa1</article-id><title-group><article-title>Decision letter</article-title></title-group><contrib-group><contrib contrib-type="editor"><name><surname>Salinas</surname><given-names>Emilio</given-names></name><role>Reviewing Editor</role><aff><institution>Wake Forest School of Medicine</institution><country>United States</country></aff></contrib></contrib-group></front-stub><body><boxed-text><p>In the interests of transparency, eLife publishes the most substantive revision requests and the accompanying author responses.</p></boxed-text><p><bold>Acceptance summary:</bold></p><p>This article will be of interest to neurophysiologists interested in identifying different neuronal types from extracellular recordings, which is a difficult computational task. It describes novel data-driven methods for functionally dissociating circuits in the primate brain: using a combination of unsupervised dimensionality reduction techniques and clustering methods to categorize extracellular spike waveform profiles (the signatures of different neuronal types), the authors identified cell types with distinct functional and neurophysiological properties in a dataset collected from the premotor cortex of two macaques. The authors went to great lengths to validate their results with clear and informative visualizations, and to outline the capabilities and limitations of this promising cell identification technique.</p><p><bold>Decision letter after peer review:</bold></p><p>Thank you for submitting your article "Non-linear Dimensionality Reduction on Extracellular Waveforms Reveals Cell Type Diversity in Premotor Cortex" for consideration by <italic>eLife</italic>. Your article has been reviewed by 2 peer reviewers, and the evaluation has been overseen by a Reviewing Editor and Michael Frank as the Senior Editor. The reviewers have opted to remain anonymous.</p><p>The reviewers have discussed their reviews with one another, and the Reviewing Editor has drafted this to help you prepare a revised submission.</p><p>Essential Revisions:</p><p>1) One potential weakness is the technical presentation of the new technique in the Supplementary information, which although complete, does not allow the targeted audience to create an intuition of it, or be able to replicate the analysis. Describing the Uniform Manifold Approximation and Projection (UMAP) idea to an audience not versed in topology and category theory is not easy. Yet the manuscript would increase its already significant impact if a small effort was put to describe UMAP both technically (as it is nicely done) and more intuitively (maybe with the help of a schematic or two). Including specific computer code would also make these methods more accessible.</p><p>2) The reviewers noted a few additional analyses that would provide further validation of the methods and a better sense of how they would generalize to other datasets. Please address the individual commentaries listed in their additional recommendations.</p><p>3) Reviewer 1 also raised a few questions that would benefit from clarification and refinement (in additional recommendations). Although not critical, addressing them would benefit the presentation and should be straightforward.</p><p><italic>Reviewer #1:</italic></p><p>1. When discussing hyperparameter optimization and gridsearch, is the entire dataset used in deriving the best values? The authors acknowledge the data-leakage in conducting cross-validation, which seems sound given the questions pursued in this paper; however, the hyperparameters themselves would also be a potential source of leakage and should be addressed/discussed (primarily for UMAP and Louvain, random forests with lesser importance).</p><p>2. The sentence "This algorithm is also fairly deterministic (after fixing a random seed) &#8230;" was confusing with the context of fixing a random seed. If it is deterministic, then one wouldn't need to fix the random seed, correct?</p><p>3. When conducting classification using random forests, do the authors utilize the normalized waveforms (i.e., same input going into WaveMAP of 1.4ms at 30 kHz; 42 samples or features)? An explicit mention of vector length would be helpful to the reader.</p><p>4. Are there any special considerations when applying UMAP to time-series data? The canonical examples of UMAP do not contain the autoregressive aspects of spiking waveforms and it might be worthwhile to mention the implications, if any, on the effectiveness of the method.</p><p>5. It is not clear how many models are being trained/tested for the generation of confusion matrices (e.g. Figure 2C). Are the binary classifiers trained to distinguish between data points from 1 vs other 7 clusters or are there different models for classification between each possible pair of clusters?</p><p>6. The authors indicate that normalization was conducted as a preprocessing step. Are there any concerns on whether normalization might be removing a potential feature that may factor into further classification between spikes? Is it a possibility that adding amplitudes would aid the clustering procedure (e.g., by including the scaling parameter for each spike as an additional feature at the end of the feature vector) or is there a methodological constraint that renders non-normalized amplitudes useless?</p><p>7. The authors addressed the issue of stability of WaveMAP, as it pertains to applying different seeds on the entire data as well as the robustness of the 8-cluster solution on varying sizes of data. My understanding is that the authors did not confirm whether those 8-cluster solutions were indeed similar (i.e. spikes clustered with neighboring spikes from the 100% dataset model) when varying data size. A third test is needed that takes a subset of the dataset (e.g., 80%; a subset that would still on average produce an 8-cluster solution) and tests whether the spikes would consistently cluster with their neighbors in the 100% data model. While this test is posthoc and the full data are still incorporated in the remainder of analysis in the paper, this might help to demonstrate the stability of the solution.</p><p><italic>Reviewer #2:</italic></p><p>1. The results rest a lot on appropriate spike sorting. Given that the probe used will not allow any of the 'triangulation' spike-sorting based methods to work (too spaced out electrodes), it would be useful for the authors to provide a bit more information (maybe in their Methods section) as to how spike-sorting was achieved. Spike selection description I find adequate, but describing the spike-sorting method as "inspection of extracellular waveforms and subsequent offline spike sorting (Plexon Offline Sorter)" doesn't really allow the reader to form a picture of the likelihood of having 'single units' that are not really single. Particularly, nothing is mentioned about thresholding of spikes. I am not suggesting a thorough description of the spike sorting method but some information on the techniques used (within the Offline sorter and/or anything else) and a report on the authors' confidence of the single units produced (and how such confidence was evaluated). I understand that the literature utilising the types of probes used by the authors is also most of the times vague as to what spike-sorting really means, yet in most cases the assignment of spikes to single units does not bare the same significance to the subsequent results as in the case of this work, hence the above argument for some more clarity on the matter.</p><p>2. A purported advantage of UMAP is that, in principle, it can deal with a very large number of features (better than t-SNE, which struggles with &gt; 50 features or so). However, the technique is still new and this belief is largely anecdotal. It would be beneficial, to both the neuroscientific community and in general to anyone considering using UMAP, if the authors could make a comparison between using the raw data set in UMAP and using a dimensionality reduced one through PCA giving only the top PCs to UMAP. Practically provide some evidence to support your choice of using the raw data set. Also providing an idea of the "complexity" of the data set would be useful (maybe by mentioning the variance explained by different number of principal components after a PCA, even if you do not end up using the primary components generated).</p><p>3. The choice of the resolution parameter for the clustering algorithm (line 873 and Figure 2B) has rather important consequences in the subsequent analysis. The choice should be better justified.</p><p>a. The authors say they chose the resolution parameter at the elbow of the number of clusters, but that is not the case. The elbow point is the immediate next one (resolution of 2 and number of clusters probably 7).</p><p>b. The "number of clusters" value that was the most stable over the resolution is 5 (given resolutions between 3 and 5 if I read the graph properly), which might be another logical assumption for picking the used resolution.</p><p>c. So although the argument that choosing a smaller resolution leads to too many clusters with too few samples in them is a valid one, nonetheless there seems to be no real argument against choosing a resolution that would lead to a smaller number of clusters. Since this clustering impacts the whole of the subsequent analysis, it would be important to understand the authors' reasoning.</p><p>d. It would be interesting for example to see how a smaller number of WaveMAP generated clusters would fare in the learnability test shown by the authors (starting at line 188).</p><p>4. Comparison to t-SNE. This comment has been prompted by the phrase "While providing visualization, these methods are difficult to cluster upon because they return a different mapping on every initialization." (line 546). The authors provide reference 155 that actually directly contradicts this statement. Although the other comments about t-SNE and UMAP are valid (reverse mapping, and use of labels in training), I believe the first comment requires either removal or better further exploration. It would be very informative if the authors did a t-SNE embedding (on the data after PCA) and then used a DBSCAN to classify the cells. This would be rather straight forward to do at their data set size, even multiple times for different initialisations (but see Dimitriadis et al., 2018a for a GPU accelerated t-SNE algorithm and also for t-SNE and DBSCAN used in spike-sorting). That result can then also be contrasted to the WaveMAP and GMM methods using their random forest classifier. Even if the results from the t-SNE / DBSCAN clustering are comparative to WaveMAP the reverse mapping capability of UMAP is definitely a strength that is very nicely utilised in this work and which any other non-linear dimensionality reduction technique doesn't have. As a final point, in the comparison section the authors could also add that t-SNE is way slower than UMAP and this speed difference starts to be very obvious (minutes vs days) in sample sizes in the hundreds of thousands to millions (i.e. the regime of spike collection that high density probes collect Dimitriadis et al. 2018b and Steinmetz et al., 2018).</p><p>5. Number of waveform features used to classify the cells. The authors compare their WaveMAP to a seemingly standard technique in the macaque literature (or maybe in the literature that uses sparse electrode probes), that uses 3 features of the waveform to classify the cells. This is not the way things are done in most of the rodent electrophysiology (or works that use denser probes or tetrodes). The standard technique is to PCA each spike form and use the first 3 components per electrode. In the case of this work that would lead to the use of the same number of features, but (probably) much larger retention of information. See Harris et al., 2000 (and references therein) for how the PCA of the waveforms is used in tetrode recordings to do spike-sorting. More recent spike-sorting algorithms use template matching of the waveform. For instance, Kilosort (Pachitariu et al., 2016) omits spike detection and PCA, which can miss useful information. Instead, it relies on identifying template waveforms and their timing properties, in order to assign spikes (in the entirety of their waveform) to different cells. Also comparing the WaveMAP process to the clustering results (following the used Generalised Gaussian Models method, or other peak-density detection methods) of this feature set would be both more informative and more valid for the larger electrophysiology community.</p><p>1. Harris, K. D., Henze, D. A., Csicsvari, J., Hirase, H., and Buzsaki, G. (2000). Accuracy of tetrode spike separation as determined by simultaneous intracellular and extracellular measurements. Journal of neurophysiology, 84(1), 401-414.</p><p>2. Pachitariu, M., Steinmetz, N., Kadir, S., and Carandini, M. (2016). Kilosort: realtime spike-sorting for extracellular electrophysiology with hundreds of channels. BioRxiv, 061481.</p><p>3. Dimitriadis, G., Neto, J. P., and Kampff, A. R. (2018). T-SNE Visualization of Large-Scale Neural Recordings. Neural Computation, 30(7), 1750-1774. https://doi.org/<ext-link ext-link-type="uri" xlink:href="http://library.stanford.edu/sfx?__char_set=utf8&amp;id=doi:10.1162/neco_a_01097&amp;sid=libx&amp;genre=article">10.1162/neco_a_01097</ext-link></p><p>4. Dimitriadis, G., Neto, J. P., Aarts, A., Alexandru, A., Ballini, M., Battaglia, F., Calcaterra, L., David, F., Fi&#225;th, R., Fraz&#227;o, J., Geerts, J. P., Gentet, L. J., Helleputte, N. V., Holzhammer, T., Hoof, C. van, Horv&#225;th, D., Lopes, G., Lopez, C. M., Maris, E., &#8230; Kampff, A. R. (2018). Why not record from every channel with a CMOS scanning probe? BioRxiv, 275818. https://doi.org/<ext-link ext-link-type="uri" xlink:href="http://library.stanford.edu/sfx?__char_set=utf8&amp;id=doi:10.1101/275818&amp;sid=libx&amp;genre=article">10.1101/275818</ext-link></p><p>5. Steinmetz, N. A., Zatka-Haas, P., Carandini, M., and Harris, K. D. (2018). Distributed correlates of visually-guided behavior across the mouse brain. BioRxiv, 474437. https://doi.org/<ext-link ext-link-type="uri" xlink:href="http://library.stanford.edu/sfx?__char_set=utf8&amp;id=doi:10.1101/474437&amp;sid=libx&amp;genre=article">10.1101/474437</ext-link></p></body></sub-article><sub-article article-type="reply" id="sa2"><front-stub><article-id pub-id-type="doi">10.7554/eLife.67490.sa2</article-id><title-group><article-title>Author response</article-title></title-group></front-stub><body><disp-quote content-type="editor-comment"><p>Essential Revisions:</p><p>1) One potential weakness is the technical presentation of the new technique in the Supplementary information, which although complete, does not allow the targeted audience to create an intuition of it, or be able to replicate the analysis. Describing the Uniform Manifold Approximation and Projection (UMAP) idea to an audience not versed in topology and category theory is not easy. Yet the manuscript would increase its already significant impact if a small effort was put to describe UMAP both technically (as it is nicely done) and more intuitively (maybe with the help of a schematic or two). Including specific computer code would also make these methods more accessible.</p></disp-quote><p>We thank the editor and reviewers for this comment on how to improve the impact of our work. Our intent with Figure 2-Supplement 1 in the manuscript was to provide this sort of intuition, and we completely agree that this intuition can be missed for the reader who does not focus on the Supplementary Information. We also recognize that spending a large amount of time reading supplementary information before understanding the conclusions of the paper can be unappealing for many readers.</p><p>To address this potential weakness we have adopted a three step strategy. First, we have now included an explicit figure (Figure 2 in the manuscript) to schematize WaveMAP and also incorporate some additional text on the principles underlying UMAP and Louvain clustering. We have also added a full methods section (&#8220;A step-by-step guide to UMAP and Louvain clustering in WaveMAP") describing the intuition for WaveMAP in detail and expanding on the steps shown in the schematic. Finally, for the reader interested in the mathematical nuances of the UMAP and Louvain clustering approach, we have included the detailed mathematical descriptions in the Supplementary Information (&#8220;UMAP dimensionality reduction"). We are confident that these three pieces now included in the manuscript should allay the potential weakness in our exposition. The following text is now provided in the results:</p><p>&#8220;In WaveMAP (Figure 2), we use a three step strategy for the analysis of extracellular wave- Main Q1 forms: We first passed the normalized and trough-aligned waveforms (Figure 2A-i) into UMAP to obtain a high-dimensional graph (Figure 2A-ii) (McInnes et al., 2018). Second, we used this graph (Figure 2B-iii) and passed it into Louvain clustering (Figure 2B-iv, Blondel et al., 2008), to delineate high-dimensional clusters. Third, we used UMAP to project the high-dimensional graph into two dimensions (Figure 2B-v). We colored the data points in this projected space according to their Louvain cluster membership found in step two to arrive at our final WaveMAP clusters (Figure 2B-vi). We also analyzed the WaveMAP clusters using interpretable machine learning (Figure 2B-vii) and also an inverse transform of UMAP (Figure 2B-viii). A detailed explanation of the steps associated with WaveMAP is available in the methods, and further mathematical details of WaveMAP are available in the supplementary information.&#8221;</p><p>Code: In our original submission, we had included a Google Colab notebook for the readers and reviewers to try out our method. We also highlight this now in the methods section of the manuscript under the &#8220;Code and Data Availability Section" so that readers can follow along by executing code if they wish to replicate various figures. These will be provided with the paper if accepted for publication. We have added the following text to the code and data availability statement at the start of our Methods section:</p><p>&#8220;All figures and figure supplements can be generated from the code and data included</p><p>with the manuscript. Figure 1 is generated using MATLAB whereas all other figures were generated using the Jupyter/Google Colab notebook available with this manuscript. Please see the Readme.md file included in the zip file for further instructions. Note, we have not included the raw firing rates across conditions for the neurons because of the large size of the data. This data can be made available by emailing the authors. Further information about WaveMAP and updated notebooks can also be obtained from the Chandrasekaran lab website at Boston University (http://www.chandlab.org).&#8221;</p><p>Since the publication of our bioRxiv paper, a few researchers have approached us to use WaveMAP. We have shared some of this computer code and they have been able to get it working for their use case. In particular, Paulk et al., (2021) used WaveMAP to understand the types of neurons observed in human dorsolateral prefrontal cortex. We are therefore condent that the included code will be helpful for other researchers who want to use WaveMAP in their workows.</p><disp-quote content-type="editor-comment"><p>2) The reviewers noted a few additional analyses that would provide further validation of the methods and a better sense of how they would generalize to other datasets. Please address the individual commentaries listed in their additional recommendations.</p></disp-quote><p>We have now responded to each of the comments in detail. Please see our responses below.</p><disp-quote content-type="editor-comment"><p>3) Reviewer 1 also raised a few questions that would benefit from clarification and refinement (in additional recommendations). Although not critical, addressing them would benefit the presentation and should be straightforward.</p></disp-quote><p>We have now edited the manuscript and responded to these comments and made changes to the manuscript to improve clarity.</p><disp-quote content-type="editor-comment"><p>Reviewer #1:</p><p>1. When discussing hyperparameter optimization and gridsearch, is the entire dataset used in deriving the best values? The authors acknowledge the data-leakage in conducting cross-validation, which seems sound given the questions pursued in this paper; however, the hyperparameters themselves would also be a potential source of leakage and should be addressed/discussed (primarily for UMAP and Louvain, random forests with lesser importance).</p></disp-quote><p>We apologize for any confusion but are a bit unsure of if this comment is referring to either (1) potential data leakage through the tuned hyperparameters of the classifier, (2) tuning of parameters in WaveMAP to maximize classification accuracy, or (3) the application of WaveMAP to the entire dataset, rather than a subset, resulting in potential overfitting. Below, we address all three of these potential points.</p><p>To address (1), we used a test-train split with k-fold cross-validation instead of just a simple test-train split when training the classifier. In the test-train split with k-fold cross-validation, a test set is put aside for final model evaluation (not tuning) and the training data is split further into another training set and a validation set. Unfortunately, the terminology does not differentiate between the training data the k-folds algorithm is applied to and the training data subset produced by the k-folds. The training-validation split is determined by k-fold splitting (five folds used in the manuscript thus an 80:20 ratio five times) and is used to train the model (training set) and tune hyperparameters (validation set). The test set is never seen by the training and hyperparameter optimization procedure and is only used after model tuning as a final evaluation of performance. We have revised the manuscript to clarify our procedure. We also corrected in the manuscript erroneous mentions of using a random forest classifier as we actually implemented a gradient boosted decision tree.</p><p>&#8220;To validate that WaveMAP finds a &#8220;real" representation of the data, we examined if a</p><p>very different method could learn the same representation. We trained a gradient boosted decision tree classifier (with a softmax multi-class objective) on the exact same waveform data (vectors of 48 time points, 1.6 ms time length) passed to WaveMAP and used a test-train split with k-fold cross-validation applied to the training data. Hyperparameters were tuned with a 5-fold cross-validated grid search on the training data and final parameters shown in Table 2. After training, the classification was evaluated against the held-out test set (which was never seen in model training/tuning) and the accuracy, averaged over clusters, was 91%.&#8221;</p><p>In regards to (2), we did not iteratively optimize or tune the parameters or hyperparameters of UMAP and Louvain to maximize classification accuracy in the boosted decision tree classifier. Because we did not parameter tune across a test-train split, we believe data leakage (at least in the conventional, deleterious sense) does not apply. The only parameter we changed relative to defaults was a slightly more conservative setting of 20 for the n neighbors parameter for UMAP. This change does affect the number of clusters since it affects the graph construction but we demonstrate that WaveMAP performs better than the GMM on features regardless of the value chosen for the n neighbor parameter (see Figure 4-Supplement 2). Tuning parameters that change cluster number, whether n neighbors or resolution, had little effect on classifier performance. WaveMAP yielded mappings that were more generalizable than a GMM on features across every number of clusters and both parameters investigated in the manuscript. Furthermore, WaveMAP maintained high separability in its clusters even across a wide range of n neighbors parameter values while the separability of clusters for the GMM on features method fell quickly.</p><p>We have included the following text in the manuscript .</p><p>&#8220;In fact, across all cluster numbers (n components from 2 to 16), a classifier tuned for the GMM performed more poorly on the GMM labels than a WaveMAP projection with the same number of clusters (Figure 4-Supplement 2E, in red). Tuning WaveMAP parameters that induce different cluster numbers, whether n neighbors (in dark blue) or resolution (in light blue), had little effect on classifier performance (Figure 4-Supplement 2E, in blues). WaveMAP yielded mappings that were more generalizable than a GMM on features across every number of clusters and both parameters investigated.&#8221;</p><p>In regards to (3), it could be that applying a transformation to the entire dataset before splitting the data could lead to potential overfitting. This is an excellent point getting at the intrinsic difficulty of applying data-driven methods to unlabeled data (where ground truth is not available). By construction, the very transformations themselves are defined by the data. Note, that using the full dataset is not unique to WaveMAP and is also a problem with GMM approaches as well.</p><p>In an ideal scenario, we would follow the recommended procedure of Moscovich and Rosset (2019) by (1) splitting the dataset, (2) applying a transformation learned from the training set to the test set, and (3) evaluating the discrepancy with a loss function. However, this procedure only works if there is some well-defined loss function incorporating ground truth. In our case, this would be &#8220;ground truth clusters". To mitigate this intrinsic difficulty, we used sufficiently large sample counts (number of neurons) to close the theoretical gap between validation error (discrepancy between train and test sets) and generalization error (discrepancy between data we do and don't have, see Figure 1 of Moscovich and Rosset, 2019). We demonstrate that sample counts are in a stable regime of cluster number presumably close to the ground truth number of clusters (Figure 3-Supplement 1B of the manuscript).</p><p>&#8220;WaveMAP was consistent in both cluster number (Figure 3-Supplement 1B, red) and cluster membership (which waveforms were frequently &#8220;co-members" of the same cluster; Figure 3-Supplement 1B, green).&#8221;</p><p>Finally, to address the notion that we are just simply finding a better parcellation of waveforms rather than anything having to do with real cell types, we also applied WaveMAP in a setting where we did know the ground truth cell types (mouse somatosensory cortex juxtacellular waveform data from Yu et al., (2019)). Alignment to ground truth was assessed through a measure called &#8220;homogeneity score" (Rosenberg and Hirschberg, 2007) in which clusterings with only one ground truth type in each cluster are given a score of 1.0 and clusterings with equal mixtures of each ground truth class is given 0.0.</p><p>In this way, we assess how aligned either method is to ground truth cell types and find that WaveMAP matches better with ground truth cell types than a GMM on features (0.67 vs. 0.41 homogeneity score). Homogeneity score is used (instead of AMI as later in our analyses) because it does not penalize a clustering for over-splitting a ground truth cluster. Homogeneity only penalizes a clustering if different ground truth types are incorporated into one cluster. WaveMAP tends to align its clusterings with the borders of ground truth cell types (<xref ref-type="fig" rid="respfig1">Author response image 1</xref>) while GMM on features more often cuts through ground truth groups forming clusters with mixtures of types (<xref ref-type="fig" rid="respfig1">Author response image 1</xref>). In the future, we hope to follow up on this promising result from mice in monkeys using optogenetics and in-vivo recordings.</p><fig id="respfig1"><label>Author response image 1.</label><caption><title>WaveMAP clusters align closer to ground truth cell types than GMM on features.</title><p>(<bold>A</bold>) At left, WaveMAP was applied to juxtacelluar waveforms of known cell type (Yu et al., 2019) withthe same parameters as in the manuscript. Cell types were identified through optogenetic-tagging and histochemical verification. At right, the ground truth cell types are displayed in the UMAP projected space. Homogeneity score for WaveMAP was 0.67. (<bold>B</bold>) At left, a Gaussian mixture model is applied in the feature space that appears in Figure 1D of (Yu et al., 2019); VIP-positive neurons were excluded for relatively lowcounts (n = 8) and some excitatory cells with very large peak-to-trough ratio were capped at a value of 10. At right, the ground truth cell types are shown in the feature space. Homogeneity score for the GMM on features was 0.41.</p></caption><graphic mime-subtype="jpeg" mimetype="image" xlink:href="elife-67490.xml.media/resp-fig1.jpg"/></fig><disp-quote content-type="editor-comment"><p>2. The sentence "This algorithm is also fairly deterministic (after fixing a random seed) &#8230;" was confusing with the context of fixing a random seed. If it is deterministic, then one wouldn't need to fix the random seed, correct?</p></disp-quote><p>We apologize for the confusing wording and have removed this sentence and revised our manuscript accordingly. The UMAP algorithm is non-deterministic by default but by setting a seed, it is made deterministic (this makes the algorithm implement gradient descent instead of stochastic gradient descent). More importantly, random seed only affects the particular layout of the points in the projected 2-dimensional space but is of no consequence to the clustering. Our procedure clusters on the network graph before this layout step, and is therefore not affected by the random seed. The fixing of the seed</p><p>is only used to reproduce the same &#8220;visualization" from run to run.</p><p>&#8220;We find that cluster memberships found by WaveMAP are stable with respect to random seed when resolution parameter and n neighbors parameter are fixed. This stability of WaveMAP clusters with respect to random seed is because much of the variability in UMAP layout is the result of the projection process (Figure 2B-v.a). Louvain clustering operates before this step on the high-dimensional graph generated by UMAP which is far less sensitive to the random seed. Thus, the actual layout of the projected clusters might differ subtly according to random seed, but the cluster memberships largely do not (see Supplementary Information and columns of Figure 3-Supplement 1A). Here, we fix the random seed purely for visual reproducibility purposes in the figure. Thus, across different random seeds and constant resolution, the clusters found by WaveMAP did not change because the graph construction was consistent across random seed at least on our dataset (Figure 3-Supplement 1A).&#8221;</p><disp-quote content-type="editor-comment"><p>3. When conducting classification using random forests, do the authors utilize the normalized waveforms (i.e., same input going into WaveMAP of 1.4ms at 30 kHz; 42 samples or features)? An explicit mention of vector length would be helpful to the reader.</p></disp-quote><p>Thank you for this comment. We used the same normalized waveforms for the classification as for the WaveMAP analysis. We have revised our manuscript to make this more explicit. We note that the waveforms are actually 1.6 ms in length and not 1.4 ms. As noted above, we also corrected in the manuscript erroneous mentions of using a random forest classifier as we actually implemented a gradient boosted decision tree.</p><p>&#8220;We trained a gradient boosted decision tree classifier (with a softmax multi-class objective) on the exact same waveform data (vectors of 48 time points, 1.6 ms time length) passed to WaveMAP and used a test-train split with k-fold cross-validation applied to the training data.&#8221;</p><disp-quote content-type="editor-comment"><p>4. Are there any special considerations when applying UMAP to time-series data? The canonical examples of UMAP do not contain the autoregressive aspects of spiking waveforms and it might be worthwhile to mention the implications, if any, on the effectiveness of the method.</p></disp-quote><p>This is correct: there is certainly additional autocorrelative structure in the data that is</p><p>perhaps not efficiently found by UMAP which treats each time point as independent. However, we don't note any deleterious effects on the analysis and a few other studies have deployed these methods on time-series data successfully (Ali et al., 2019; Gouwens et al., 2020; Jia et al., 2019; Sedaghat-Nejad et al., 2021). We have updated the Discussion of the manuscript to mention this:</p><p>&#8220;We also note that while traditional uses of non-linear dimensionality reduction have been applied to data lacking autoregressive properties, such as transcriptomic expression (Becht et al., 2018), this doesn't seem to be an issue for WaveMAP. Even though our waveforms have temporal autocorrelation, our method still is able to pick out interesting structure. Other work has found similar success in analyzing time series data with non-linear dimensionality reduction (Ali et al., 2019; Gouwens et al., 2020; Jia et al., 2019; Sedaghat-Nejad et al.,2021).&#8221;</p><disp-quote content-type="editor-comment"><p>5. It is not clear how many models are being trained/tested for the generation of confusion matrices (e.g. Figure 2C). Are the binary classifiers trained to distinguish between data points from 1 vs other 7 clusters or are there different models for classification between each possible pair of clusters?</p></disp-quote><p>We realize we were unclear in our exposition and have revised our manuscript accordingly.</p><p>We mistakenly stated that our classifier was using a logistic objective function for binary classification. The gradient boosted decision tree used was actually a single multi-class classifier with the softmax objective function (multivariate logistic regression, xgboost). XGBoost.XGBClassifier conducts the classification simultaneously: the classifier assigns a probability to each class for each data point. It then chooses the class with greatest probability and outputs this as the predicted class label. We have updated the manuscript accordingly in the Results and Supplementary Figure sections. This comment also helped us realize we used a gradient boosted decision tree rather than a gradient boosted random forest algorithm.</p><p>&#8220;To validate that WaveMAP finds a &#8220;real" representation of the data, we examined if a</p><p>very different method could learn the same representation. We trained a gradient boosted decision tree classifier (with a softmax multi-class objective) on the exact same waveform data (vectors of 48 time points, 1.6 ms time length) passed to WaveMAP and used a test-train split with k-fold cross-validation applied to the training data. Hyperparameters were tuned with a 5-fold cross-validated grid search on the training data and final parameters shown in Table 2.&#8221;</p><disp-quote content-type="editor-comment"><p>6. The authors indicate that normalization was conducted as a preprocessing step. Are there any concerns on whether normalization might be removing a potential feature that may factor into further classification between spikes? Is it a possibility that adding amplitudes would aid the clustering procedure (e.g., by including the scaling parameter for each spike as an additional feature at the end of the feature vector) or is there a methodological constraint that renders non-normalized amplitudes useless?</p></disp-quote><p>This is an interesting suggestion. We applied WaveMAP to unnormalized waveforms but</p><p>found that it failed to focus on any structure beyond spike amplitude. We intuit that this is similar to the reasons for normalizing prior to principal component analysis: if unnormalized data is passed into this method, most of the variance explained is &#8220;eaten up" by data points with large variance. Similarly, we think that UMAP tries to explain more of the variability induced by large amplitude spikes at the expense of other features. We evaluated the influence of amplitude by applying UMAP to unnormalized average spike waveforms, and we found that it led to a loss of interpretable structure in the appearance of clustered points in the projected space. Intuitively, UMAP is focused on explaining the large amount of variation due to spike amplitude to the detriment of other features more related to cell type. This is readily apparent when coloring the points associated with each waveform in the projected space by the spike amplitude .</p><p>Exclusion of spike amplitude is perhaps not problematic because spike amplitude in extracellular settings does not seem to be an important differentiator of most cell types. Amplitude in extracellular settings varies according to three dimensions orthogonal to cell type differences: (1) it attenuates with distance from the recording electrode (Gold et al., 2006); (2) it varies in different parts of the neuron and is probably greatest at the soma or initial segment (Gold et al., 2006); and (3) gradually decreases during sustained firing in a spike train (Quirk and Wilson, 1999). So although some cell types exhibit different amplitude spikes as a reliable differentiator (Betz cells have extremely large spike amplitudes), most of spike amplitude variation is unrelated to cell type differences. We have updated the appropriate section in the Discussion to make this point clearer.</p><p>This is now included in panels E and F of Figure 3-Supplement 2; the text below is now included in the discussion portion of the manuscript:</p><p>Waveform cluster shapes are unlikely to arise from electrode placement</p><p>It is a possibility that the diversity of waveforms we observe is just an artifact of electrode placement relative to the site of discharge. This supposes that waveform shape changes with respect to the distance between the neuron and the electrode. This is unlikely because both in vitro studies (Deligkaris et al., 2016) and computational simulations (Gold et al., 2006) show distance from the soma mostly induces changes in amplitude. There is a small widening in waveform width but this occurs at distances in which the amplitude has attenuated below even very low spike thresholds (Gold et al., 2006). We controlled for this cell type-irrelevant variation in amplitude by normalizing spikes during preprocessing. It should also be noted that without any normalization, all structure was lost in the UMAP projection which instead yielded one large point cloud (Figure 3-Supplement 2E). Intuitively, this can be understood as UMAP allocating most of the projected space to explaining amplitude differences rather than shape variation. This can be visualized by coloring each point by the log of the amplitude of each spike (log of difference in maximum vs. minimum values) and observing that it forms a smooth gradient in the projected space (Figure 3-Supplement 2F).</p><disp-quote content-type="editor-comment"><p>7. The authors addressed the issue of stability of WaveMAP, as it pertains to applying different seeds on the entire data as well as the robustness of the 8-cluster solution on varying sizes of data. My understanding is that the authors did not confirm whether those 8-cluster solutions were indeed similar (i.e. spikes clustered with neighboring spikes from the 100% dataset model) when varying data size. A third test is needed that takes a subset of the dataset (e.g., 80%; a subset that would still on average produce an 8-cluster solution) and tests whether the spikes would consistently cluster with their neighbors in the 100% data model. While this test is posthoc and the full data are still incorporated in the remainder of analysis in the paper, this might help to demonstrate the stability of the solution.</p></disp-quote><p>This is an excellent point and something not fully addressed in our original mansucript. We have updated Figure 3-Supplement 1B in the manuscript. We calculated the adjusted mutual information score (AMI) between the WaveMAP clusters on randomly sampled subsets vs. WaveMAP on the full dataset and across 100 random samplings. AMI is a measure of how correlated two variables are, adjusted for correlation due to random chance and is a measure of how much information we receive about one variable given observation of another and vice versa (see note at end of response for a detailed description of this metric). Since subsets don't contain all the data points in the dataset, we drop the points from the full dataset missing from the subset when we make our AMI calculation (with the appropriate normalizations given clusterings are of different number, by construction). We find that across a large range of data subsets the clusterings largely agree with the solution using the full dataset. This is shown through the relatively high AMI scores once half of the dataset is included in the analysis. In addition, AMI scores tend to level off at approximately the same time as does the number of Louvain communities (clusters) which suggests that WaveMAP is consistently clustering the same units into the same fixed number of clusters.</p><p>We also compared WaveMAP to other methods suggested by Reviewer 2 (DBSCAN in t-SNE projected space and a GMM on PCA-projected data) using the same data subsetting framework (Figure 4-figure supplement 1). Although both DBSCAN on t-SNE and GMM on PCA produce what appears to be sensible clusters (Figure 4-figure supplement 1A), they exhibit suboptimal properties when AMI is examined vs. data fraction. While WaveMAP starts at a moderate AMI and steadily increases, DBSCAN on t-SNE starts very low and only reaches high levels at large data fraction. GMM on PCA's AMI does not increase with increasing data fraction (Figure 4-figure supplement 1B). In addition, GMM on PCA's AMI scores contract to different values at high data fraction suggesting that this method is switching between different solutions (local minima, perhaps what you were referring to with your comment about WaveMAP; Figure 4-figure supplement 1C, right). Comparison of WaveMAP to these other methods|which cluster in the embedded space suggests that clustering on the high-dimensional graph, before dimensionality reduction for visualization, is key to WaveMAP's stability in terms of both consistent numbers of clusters and cluster membership. The text below is now included in the manuscript and is panel B of Figure 3-Supplement 1.</p><p>&#8220;We also found that WaveMAP was robust to data subsetting (randomly sampled subsets of the full dataset, see Supplementary Information Tibshirani and Walther, 2012), unlike other clustering approaches (Figure 3-Supplement 1B, green, Figure 4-Supplement 1). We applied WaveMAP to 100 random subsets each from 10% to 90% of the full dataset and compared this to a &#8220;reference" clustering produced by the procedure on the full dataset. WaveMAP was consistent in both cluster number (Figure 3-Supplement 1B, red) and cluster membership (which waveforms were frequently &#8220;co-members" of the same cluster; Figure 3-Supplement 1B, green).&#8221;</p><disp-quote content-type="editor-comment"><p>Reviewer #2:</p><p>1. The results rest a lot on appropriate spike sorting. Given that the probe used will not allow any of the 'triangulation' spike-sorting based methods to work (too spaced out electrodes), it would be useful for the authors to provide a bit more information (maybe in their Methods section) as to how spike-sorting was achieved. Spike selection description I find adequate, but describing the spike-sorting method as "inspection of extracellular waveforms and subsequent offline spike sorting (Plexon Offline Sorter)" doesn't really allow the reader to form a picture of the likelihood of having 'single units' that are not really single. Particularly, nothing is mentioned about thresholding of spikes. I am not suggesting a thorough description of the spike sorting method but some information on the techniques used (within the Offline sorter and/or anything else) and a report on the authors' confidence of the single units produced (and how such confidence was evaluated). I understand that the literature utilising the types of probes used by the authors is also most of the times vague as to what spike-sorting really means, yet in most cases the assignment of spikes to single units does not bare the same significance to the subsequent results as in the case of this work, hence the above argument for some more clarity on the matter.</p></disp-quote><p>We thank the reviewer for this comment and agree that the definition of "spike-sorting" can be murky in many cases. We also recognize that we were perhaps too brief in our description of how the waveforms were selected. But for our experiments, it is straightforward. We have now expanded out the text in the methods to describe how exactly we identified our units. Please see the text below that has been included in the methods section of the manuscript. Our confidence in our waveforms comes from a</p><p>mixture of online vigilance and offline spike sorting.</p><p>In the methods section, we included the following subsection:</p><p>&#8220;Identification of single neurons during recordings Our procedure for identifying well-isolated single neurons was as follows: In the case of the single FHC tungsten electrode recordings, we moved the electrode and conservatively adjusted the threshold until we identified a well-demarcated set of waveforms. [&#8230;] Finally, the same offline procedures used for FHC electrodes were repeated for the U-probe recordings.&#8221;</p><p>In the results, we included the following sentences:</p><p>&#8220;We restricted our analysis to well-isolated single neurons identified through a combination of careful online isolation combined with offline spike sorting (see Methods section: Identification of single neurons during recordings). Extracellular waveforms were isolated as single neurons by only accepting waveforms with minimal ISI violations (1.5% &lt; 1.5 ms). This combination of online vigilance, combined with offline analysis, provides us the confidence to label these waveforms as single neurons.&#8221;</p><disp-quote content-type="editor-comment"><p>2. A purported advantage of UMAP is that, in principle, it can deal with a very large number of features (better than t-SNE, which struggles with &gt; 50 features or so). However, the technique is still new and this belief is largely anecdotal. It would be beneficial, to both the neuroscientific community and in general to anyone considering using UMAP, if the authors could make a comparison between using the raw data set in UMAP and using a dimensionality reduced one through PCA giving only the top PCs to UMAP. Practically provide some evidence to support your choice of using the raw data set. Also providing an idea of the "complexity" of the data set would be useful (maybe by mentioning the variance explained by different number of principal components after a PCA, even if you do not end up using the primary components generated).</p></disp-quote><p>Thank you for this comment and helping us to better communicate the impact of our</p><p>method. We were not sure if the &#8220;advantages" referred to here had to do with either (or both) (1) UMAP being anecdotally more performant than t-SNE on high-dimensional data or (2) PCA being necessary pre-processing for UMAP to elicit interesting structure in the projected space. We address both potential questions below.</p><p>To the first point, PCA is often suggested for t-SNE as it reduces the dimensionality of the input data and is useful for speeding up the t-SNE algorithms. However, recent improvements in the algorithm, such as FIt-SNE (Linderman et al., 2019), put algorithm speed on par with UMAP even with high input dimensionalities. In our dataset, FIt-SNE computed faster than UMAP with the ambient 48-dimensions: 2.39 &#177; 0.03 s vs. 4.92 &#177; 0.12 s (mean &#177; S.D.) for FIt-SNE and UMAP respectively. Thus, if input dimensionality is specifically considered, both t-SNE and UMAP are equally performant. However, UMAP scales better with increasing output dimensionality (the n components parameter) whereas t-SNE, across its implementations, becomes exponentially slower (McInnes et al., 2018). Although not used in our manuscript, in other settings it might be interesting to explore UMAP dimensions beyond two (UMAP-3, -4, -5, etc). We have edited our manuscript to make clear that both algorithms are similarly fast with respect to input dimension but differ in how their performance scales with output dimension. The revised discussion now reads:</p><p>&#8220;At the core of WaveMAP is UMAP which has some advantages over other non-linear</p><p>dimensionality reduction methods that have been applied in this context. Although most</p><p>algorithms offer fast implementations that scale well to large input dimensionalities and</p><p>volumes of data (Linderman et al., 2019; Nolet et al., 2020), UMAP also projects efficiently into arbitrary output dimensionalities while also returning an invertible transform. That is, we can efficiently project new data into any arbitrary dimensional projected space without having to recompute the mapping.&#8221;</p><p>To the second point about UMAP's structure with and without pre-processing with PCA, we find that the structure is largely the same after being passed data reduced onto the first three principal components covering 94% of the variance. This is perhaps unsurprising given the low-dimensionality of our dataset which can be fully-captured in just a few components. We have added the following text and a supplementary figure (Figure 3-Supplement 3) to the manuscript to provide this information.</p><p>&#8220;In addition, common recommendations to apply PCA before non-linear dimensionality reduction were not as important for our waveform dataset, which was fairly low-dimensional (first three PC's explained 94% variance). Projecting waveforms into a 3-dimensional PC-space before WaveMAP produced a clustering very similar to data without this step (Figure 3-Supplement 3).&#8221;</p><disp-quote content-type="editor-comment"><p>3. The choice of the resolution parameter for the clustering algorithm (line 873 and Figure 2B) has rather important consequences in the subsequent analysis. The choice should be better justified.</p><p>a. The authors say they chose the resolution parameter at the elbow of the number of clusters, but that is not the case. The elbow point is the immediate next one (resolution of 2 and number of clusters probably 7).</p><p>b. The "number of clusters" value that was the most stable over the resolution is 5 (given resolutions between 3 and 5 if I read the graph properly), which might be another logical assumption for picking the used resolution.</p><p>c. So although the argument that choosing a smaller resolution leads to too many clusters with too few samples in them is a valid one, nonetheless there seems to be no real argument against choosing a resolution that would lead to a smaller number of clusters. Since this clustering impacts the whole of the subsequent analysis, it would be important to understand the authors' reasoning.</p><p>d. It would be interesting for example to see how a smaller number of WaveMAP generated clusters would fare in the learnability test shown by the authors (starting at line 188).</p></disp-quote><p>We thank the reviewer for this question. We realized that our justification for how we chose the number of clusters was very unclear.</p><p>1. Regarding the first point, unlike the Gaussian mixture model where the number of clusters is an actual parameter, number of clusters is not a parameter for WaveMAP. Instead, this is indirectly controlled by the resolution parameter. The number of clusters is an output after choosing a resolution parameter and is not the parameter optimized for.</p><p>2. The objective function for optimizing WaveMAP is the maximization of modularity score. Modularity (the &#8220;connectedness" of a community, see Methods) is a community-wise measure defined as the difference between the weights of the edges within a cluster and the edges incoming from any other node outside of the cluster. Maximizing this value over the whole graph finds communities with high amounts of intra-connectivity and low out-connectivity. We chose a resolution parameter that balanced our need for ensuring that we don't fractionate our dataset into clusters with very few samples while also maximizing the modularity score. A resolution parameter of 1.5 allowed us to balance these two goals. Choosing a resolution of 1 would have led to many clusters with small sample sizes and a resolution parameter of 2 would have meant that we were choosing a suboptimal solution in terms of modularity. This is our justification for choosing 1.5 in the manuscript. In the future, further knowledge of ground truth and a rough understanding of the number of candidate cell types identified by electrophysiology might also make it easier to use that as a prior for a resolution parameter.</p><p>3. We recognize that it might be difficult to choose a resolution parameter. For this particular case, we also offer the option of using Ensemble Clustering With Graphs (ECG). This clustering method obviates the need for choosing a resolution parameter. The results of ECG are nearly exact to what we found using a resolution parameter of 1.5. We have now updated the text to better justify our choice of 1.5 as the resolution parameter for this dataset.</p><p>4. Louvain clustering also acts hierarchically with respect to changing resolution parameter: clusters split or merge as the parameter decreases or increases respectively (Figure 3-Supplement 1A). The clusters and their constituent members do not shift dramatically as with other methods such as a GMM (Figure 4-Supplement 1C) when the number of clusters changes. Thus, even though the number of clusters change with resolution, the overall structure does not. We have now provided the following revised description in the text to better describe the justification for choosing 1.5 as our resolution parameter. We also added an explicit supplementary section which expands on the text in the results further.</p><p>&#8220;The number of clusters identified by WaveMAP is dependent on the resolution parameter for Louvain clustering. A principled way to choose this resolution parameter is to use the modularity score (a measure of how tightly interconnected the members of a cluster are) as the objective function to maximize. We chose a resolution parameter of 1.5 that maximized modularity score while ensuring that we did not overly fractionate the dataset (n &lt; 20 within a cluster; Figure 3A, B, and columns of Figure 3-Supplement 1A). Additional details are available in the &#8220;Parameter Choice" section of the Supplementary Information.</p><p>Louvain clustering with this resolution parameter of 1.5 identified eight clusters in total</p><p>(Figure 3A). Note, using a slightly higher resolution parameter (2.0), a suboptimal solution in terms of modularity, led to seven clusters (Figure 3-Supplement 1A). The advantage of Louvain clustering is that it is hierarchical and choosing a slightly larger resolution parameter will only merge clusters rather than generating entirely new cluster solutions. Here, we found that the higher resolution parameter merged two of the broad-spiking clusters 6 and 7 while keeping the rest of the clusters largely intact and more importantly, did not lead to material changes in the conclusions of analyses of physiology, decision-related dynamics, or laminar distribution described below. Finally, an alternative ensembled version of the Louvain clustering algorithm (ensemble clustering for graphs [ECG]; Poulin and Theberge, 2018), which requires setting no resolution parameter, produced a clustering almost exactly the same as our results (Figure 3-Supplement 1C).</p><p>We also added additional details in the supplementary information to guide the reader further:</p><p>&#8220;Parameter Choice: Louvain clustering (Blondel et al., 2008) requires the specification</p><p>of a resolution parameter, t, which controls the &#8220;characteristic scale" by which network</p><p>communities are identified; the larger this parameter, the fewer the number of clusters</p><p>(communities) detected and vice versa (Lambiotte, 2007).</p><p>We selected a resolution parameter based on two factors. The most important factor was modularity score. Modularity (the &#8220;connectedness" of a community, see Methods) is a community-wise measure defined as the difference between the weights of the edges within a cluster and the edges incoming from any other node outside of the cluster. Maximizing this value over the whole graph finds communities with high amounts of intra-connectivity and low out-connectivity. The second factor we considered was the sizes of the resulting clusters after choosing a resolution parameter. We did not want clusters with too few members (n &lt; 20) which would be statistically difficult to interpret. The regions with the highest modularity score were at a resolution parameter of 1 and decreased from there onwards. However, choosing a resolution parameter of 1 led to a large number of clusters which often had very few members (Figure 3-Supplement 1A, leftmost column) making downstream statistical comparisons underpowered. We therefore chose t to be 1.5 which resulted in the next best average modularity score of 0:761 &#177; 0:004 (mean &#177; S.D.) and an average of 8:29 &#177; 0:84 (mean &#177; S.D.) clusters across 25 random data permutations (Figure 3B). In this manner, we found a set of waveform clusters that balanced the diversity found by UMAP and statistical interpretability.&#8221;</p><p>We also agree it is important to see if our results only hold for a particular choice of resolution parameter. We find that across many values of resolution parameter UMAP is better than GMM. In the original manuscript, we performed this analysis for 4 and 8 clusters and found that WaveMAP was better than a GMM. Now, at your and R1's encouragement, we have performed it for all values from 2 to 16 and show that in every case our solution is as good if not better than GMMs (Figure 4-Supplement 2). We also changed both the n neighbors and resolution parameters from UMAP and Louvain respectively to yield a large range of cluster numbers; at the same time, we changed the number of components for a Gaussian mixture model (the n components parameter) across the same range. We find that, across every cluster number, a gradient boosted decision tree classifier trained on WaveMAP clusters had better performance than the clusters produced by a GMM applied to waveform features. We have now included a new supplementary figure (Figure 4-Supplement 2) and the text below to address your concern.</p><p>&#8220;In fact, across all cluster numbers (n components from 2 to 16), a classifier tuned for the GMM performed more poorly on the GMM labels than a WaveMAP projection with the same number of clusters (Figure 4-Supplement 2E, in red). Tuning WaveMAP parameters that induce different cluster numbers, whether n neighbors (in dark blue) or resolution (in light blue), had little effect on classifier performance (Figure 4-Supplement 2E, in blues). WaveMAP yielded mappings that were more generalizable than a GMM on features across every number of clusters and both parameters investigated.&#8221;</p><disp-quote content-type="editor-comment"><p>4. Comparison to t-SNE. This comment has been prompted by the phrase "While providing visualization, these methods are difficult to cluster upon because they return a different mapping on every initialization." (line 546). The authors provide reference 155 that actually directly contradicts this statement. Although the other comments about t-SNE and UMAP are valid (reverse mapping, and use of labels in training), I believe the first comment requires either removal or better further exploration. It would be very informative if the authors did a t-SNE embedding (on the data after PCA) and then used a DBSCAN to classify the cells. This would be rather straight forward to do at their data set size, even multiple times for different initialisations (but see Dimitriadis et al., 2018a for a GPU accelerated t-SNE algorithm and also for t-SNE and DBSCAN used in spike-sorting). That result can then also be contrasted to the WaveMAP and GMM methods using their random forest classifier. Even if the results from the t-SNE / DBSCAN clustering are comparative to WaveMAP the reverse mapping capability of UMAP is definitely a strength that is very nicely utilised in this work and which any other non-linear dimensionality reduction technique doesn't have. As a final point, in the comparison section the authors could also add that t-SNE is way slower than UMAP and this speed difference starts to be very obvious (minutes vs days) in sample sizes in the hundreds of thousands to millions (i.e. the regime of spike collection that high density probes collect Dimitriadis et al. 2018b and Steinmetz et al., 2018).</p></disp-quote><p>We thank the reviewer for pointing out this inaccuracy and we have removed this statement. We have now revised the Discussion to instead focus more on three advantages that UMAP has over other non-linear dimensionality reduction methods namely (1) it is highly-performant and approximately invertible with respect to output dimensionality, (2) it returns an invertible transform for cross-validation, and (3) it supports supervised and semi-supervised learning.</p><p>&#8220;At the core of WaveMAP is UMAP which has some advantages over other non-linear dimensionality reduction methods that have been applied in this context. Although most</p><p>algorithms offer fast implementations that scale well to large input dimensionalities and</p><p>volumes of data (Linderman et al., 2019; Nolet et al., 2020), UMAP also projects efficiently into arbitrary output dimensionalities while also returning an invertible transform. That is, we can efficiently project new data into any arbitrary dimensional projected space without having to recompute the mapping.</p><p>These properties provide three advantages over other non-linear dimensionality reduction approaches: First, our method is stable in the sense that it produces a consistent number of clusters and each cluster has the same members across random subsamples (Figure 3-Supplement 1B). Clustering in the high-dimensional space rather than in the projected space lends stability to our approach. Second, it allows exploration of any region of the projectedspace no matter the intuited latent dimensionality this yields an intuitive understanding of how UMAP non-linearly transforms the data, which might be related to underlying biological phenomena. Thus, UMAP allows WaveMAP to go beyond a &#8220;discriminative model" typical of other clustering techniques and function as a &#8220;generative model" with which to make predictions. Third, it enables cross-validation of a classifier trained on cluster labels, impossible with methods that don't return an invertible transform. To cross-validate unsupervised methods, unprocessed test data must be passed into a transform computed only on training data and evaluated with some loss function (Moscovich and Rosset, 2019). This is only possible if an invertible transform is admitted by the method of dimensionality reduction as in UMAP.&#8221;</p><p>We also agree that it would be useful to examine how our method compares against DBSCAN clustering applied to a t-SNE projection which has been useful for clustering neural activity (Dimitriadis et al., 2018; Mahallati et al., 2019). However, we would first like to draw a distinction between the clustering of single spikes for purposes of spike sorting and the clustering of averaged spikes from single units for putative type classification. In the former, spikes seem to clearly separate into clusters in the projected space of t-SNE or UMAP (see Dimitriadis et al., 2018 and Sedaghat-Nejad et al., 2021); in the latter (our work), averaged single unit spikes seem to form continuums that cannot be easily delineated as clusters in the projected space. This seems to be a key difference explaining how clustering in the projected space succeeds for spike sorting but fails (as we will show next) for clustering in the projected space for cell type classification.</p><p>Here, we compare WaveMAP against DBSCAN on a t-SNE projection. To make this comparison, we applied each method to 100 random subsamples, of various proportions of the full dataset, and calculated the adjusted mutual information score (AMI) against a &#8220;reference" clustering obtained from applying the respective method to the full dataset. For a detailed explanation of AMI, see note at the end of the document. We find that while DBSCAN on t-SNE produces parcellations of waveform structure very similar to WaveMAP when examining a single clustering (Figure 4&#8212;figure supplement 1A) of the full dataset, this method produces very variable clusterings on random subsets of the full dataset (Figure 4&#8212;figure supplement 1B).</p><p>WaveMAP begins with moderate AMI scores at low data fractions and increases steadily. DBSCAN on t-SNE begins at very low AMI scores, increases quickly with data fraction, then matches WaveMAP at high percentages of the full dataset. However, DBSCAN on t-SNE exhibits highly variable AMI scores as individual analyses are examined especially at lower data fractions (Figure 4&#8212;figure supplement 1C, left). So although DBSCAN on t-SNE's variability disappears at 90% of the dataset (Figure 4&#8212;figure supplement 1C, right), this suggests that this technique might generalize less well out-of-dataset or else requires more data to converge than is provided in our data fractions.</p><p>Based on these results, we have now added the following text to the manuscript to assuage readers who might have the same question:</p><p>&#8220;We also found that WaveMAP was robust to data subsetting (randomly sampled subsets of the full dataset, see Supplementary Information Tibshirani and Walther, 2012), unlike other clustering approaches (Figure 3-Supplement 1B, green, Figure 4-Supplement 1). We applied WaveMAP to 100 random subsets each from 10% to 90% of the full dataset and compared this to a &#8220;reference" clustering produced by the procedure on the full dataset. WaveMAP was consistent in both cluster number (Figure 3-Supplement 1B, red) and cluster membership (which waveforms were frequently &#8220;co-members" of the same cluster; Figure 3-Supplement 1B, green).&#8221;</p><p>Thus in conclusion, WaveMAP seems to perform better in our particular use case in parcellating somewhat continuous structure while methods like DBSCAN on t-SNE are successful for other use cases such as spike sorting where clearer clusterings exist after non-linear dimensionality reduction. We reiterate that we think this has more to do with clustering on the network graph vs. in the projected space and this doesn't have to do with the choice of particular dimensionality reduction whether that be UMAP or t-SNE.</p><p>To address the final point on comparing the algorithmic performance of t-SNE vs. UMAP, we've revised the manuscript to avoid comparisons. We actually found that, given the low-dimensionality and low number of data points in our dataset, that t-SNE (with FIt-SNE) was faster than UMAP. However, there also exists an incredibly fast GPU implementation of UMAP through RAPIDS's cuML library (Nolet et al., 2020) but benchmarks were generated with an enterprise GPU cluster (NVIDIA DGX-1) that most researchers would not have access to. Thus we eschew this point as the advantages of one method or the other depend intrinsically upon the properties of the dataset and the means of the investigators.</p><disp-quote content-type="editor-comment"><p>5. Number of waveform features used to classify the cells. The authors compare their WaveMAP to a seemingly standard technique in the macaque literature (or maybe in the literature that uses sparse electrode probes), that uses 3 features of the waveform to classify the cells. This is not the way things are done in most of the rodent electrophysiology (or works that use denser probes or tetrodes). The standard technique is to PCA each spike form and use the first 3 components per electrode. In the case of this work that would lead to the use of the same number of features, but (probably) much larger retention of information. See Harris et al., 2000 (and references therein) for how the PCA of the waveforms is used in tetrode recordings to do spike-sorting. More recent spike-sorting algorithms use template matching of the waveform. For instance, Kilosort (Pachitariu et al., 2016) omits spike detection and PCA, which can miss useful information. Instead, it relies on identifying template waveforms and their timing properties, in order to assign spikes (in the entirety of their waveform) to different cells. Also comparing the WaveMAP process to the clustering results (following the used Generalised Gaussian Models method, or other peak-density detection methods) of this feature set would be both more informative and more valid for the larger electrophysiology community.</p></disp-quote><p>We apologize for not making a clearer distinction but we only document how our method performs in the context of classifying waveforms into candidate cell types rather than the sorting of individual spikes to distinguish single units (spike sorting). For spike sorting, researchers definitely use the first 3 principal components. Indeed, we did the same when drawing our cluster boundaries to identify isolated single neurons. However, what WaveMAP is focused on is separating cell classes which is often done after spike sorting.</p><p>We know of many rodent papers that use the feature based method on average waveforms to drive inference about the role of putative excitatory and inhibitory neurons in neural circuits (see examples from Yu et al., (2019), Robbins et al., (2013), Bartho et al., (2004), Niell and Stryker (2008), Bruno and Simons (2002), Stringer et al., (2016), and review by Peyrache and Destexhe (2019)). We don't think using waveform features is something unique to the macaque literature and many of these papers often deploy tetrode based electrophysiological methods to record neural activity. Having said that, perhaps there is opportunity for future work to eschew separate methods for sorting and classification and combine them in a single step.</p><p>Nevertheless, we hear your comment about the usefulness of comparing WaveMAP to a GMM based on the first three PCs (instead of features). We compared WaveMAP directly against a GMM on PCA in terms of AMI (Figure S3). We set a &#8220;ground truth" by applying the method to the full dataset and generated an AMI for each data fraction by averaging the individual AMI scores for 100 random subsets at various data fractions. While the GMM on PCA formed what seems to be sensible clusters, these were fairly variable and this did not improve even with increasing data fraction. Examining each run individually, we can see that at 40% of the full dataset, the AMI has a large variance but at 90% of the full dataset, this variance is qualitatively different instead of forming a continuum of AMI values over a range, the AMI contracts towards different locations. This seems to suggest that even at high data fractions, GMM on PCA arrives at several local minima in the solution space rather than WaveMAP which aggregates at one specific AMI score.</p><p>We have included Figure S3 in the manuscript and included the following text:</p><p>We also found that WaveMAP was robust to data subsetting (randomly sampled subsets of the full dataset, see Supplementary Information (Tibshirani and Walther, 2012)), unlike other clustering approaches (Figure 3-Supplement 1B, green, Figure 4-Supplement 1). We applied WaveMAP to 100 random subsets each from 10% to 90% of the full dataset and compared this to a &#8220;reference" clustering produced by the procedure on the full dataset. WaveMAP was consistent in both cluster number (Figure 3-Supplement 1B, red) and cluster membership (which waveforms were frequently &#8220;co-members" of the same cluster; Figure 3-Supplement 1B, green).&#8221;</p><p>To answer your second comment, as to whether WaveMAP would do better than template based methods, we again have to emphasize that this paper is more focused on the identification of cell classes after spike sorting. While we have not explicitly addressed the use of WaveMAP for spike-sorting, evidence suggests the method of UMAP in conjunction with graph clustering would also be effective in separating out units where the PCA approach is insufficient. We would like to point the reviewer to Sedaghat-Nejad et al., (2021) in which they use UMAP in their P-sort spike sorter and find that it outperforms template sorters (Kilosort2) or peak-density centroid methods (SpyKING CIRCUS) especially when more complicated waveforms are present as in cerebellar recordings. We should also mention that this group is interested in incorporating the innovations from our WaveMAP paper into their workow (personal communication).</p><p>ReferencesAli M, Jones MW, Xie X, Williams M (2019) TimeCluster: dimension reduction applied to</p><p>temporal data for visual analytics. The Visual Computer 35:1013{1026.</p><p>Bartho P, Hirase H, Monconduit L, Zugaro M, Harris KD, Buzsaki G (2004) Characterization of Neocortical Principal Cells and Interneurons by Network Interactions and Extracellular Features. Journal of Neurophysiology 92:600{608.</p><p>Becht E, McInnes L, Healy J, Dutertre CA, Kwok IWH, Ng LG, Ginhoux F, Newell EW (2018)</p><p>Dimensionality reduction for visualizing single-cell data using UMAP. Nature</p><p>Biotechnology 37:38{44.</p><p>Blondel VD, Guillaume JL, Lambiotte R, Lefebvre E (2008) Fast unfolding of communities in large networks. Journal of Statistical Mechanics: Theory and Experiment 2008:P10008.</p><p>Bruno RM, Simons DJ (2002) Feedforward Mechanisms of Excitatory and Inhibitory Cortical Receptive Fields. Journal of Neuroscience 22:10966{10975.</p><p>Deligkaris K, Bullmann T, Frey U (2016) Extracellularly Recorded Somatic and Neuritic Signal Shapes and Classification Algorithms for High-Density Microelectrode Array Electrophysiology. Frontiers in Neuroscience 10:421.</p><p>Dimitriadis G, Neto JP, Aarts A, Alexandru A, Ballini M, Battaglia F, Calcaterra L, Chen S,</p><p>David F, Fiath R, Fraz~ao J, Geerts JP, Gentet LJ, Helleputte NV, Holzhammer T, Hoof Cv,</p><p>Horvath D, Lopes G, Lopez CM, Maris E, Marques-Smith A, Marton G, McNaughton BL,</p><p>Meszena D, Mitra S, Musa S, Neves H, Nogueira J, Orban GA, Pothof F, Putzeys J, Raducanu BC, Ruther P, Schroeder T, Singer W, Steinmetz NA, Tiesinga P, Ulbert I, Wang S, Welkenhuysen M, Kampff AR (2020) Why not record from every electrode with a CMOS scanning probe? bioRxiv p. 275818.</p><p>Dimitriadis G, Neto JP, Kampff AR (2018) t-SNE Visualization of Large-Scale Neural</p><p>Recordings. Neural Computation 30:1750{1774.</p><p>Gold C, Henze DA, Koch C, Buzsaki G (2006) On the Origin of the Extracellular Action</p><p>Potential Waveform: A Modeling Study. Journal of Neurophysiology 95:3113{3128.</p><p>Gouwens NW, Sorensen SA, Baftizadeh F, Budzillo A, Lee BR, Jarsky T, Alfiler L, Baker K,</p><p>Barkan E, Berry K, Bertagnolli D, Bickley K, Bomben J, Braun T, Brouner K, Casper T,</p><p>Crichton K, Daigle TL, Dalley R, de Frates RA, Dee N, Desta T, Lee SD, Dotson N, Egdorf T,</p><p>Ellingwood L, Enstrom R, Esposito L, Farrell C, Feng D, Fong O, Gala R, Gamlin C, Gary A,</p><p>Glandon A, Goldy J, Gorham M, Graybuck L, Gu H, Hadley K, Hawrylycz MJ, Henry AM,</p><p>Hill D, Hupp M, Kebede S, Kim TK, Kim L, Kroll M, Lee C, Link KE, Mallory M, Mann R,</p><p>Maxwell M, McGraw M, McMillen D, Mukora A, Ng L, Ng L, Ngo K, Nicovich PR, Oldre A,</p><p>Park D, Peng H, Penn O, Pham T, Pom A, Popovic Z, Potekhina L, Rajanbabu R, Ransford S, Reid D, Rimorin C, Robertson M, Ronellenfitch K, Ruiz A, Sandman D, Smith K, Sulc J,</p><p>Sunkin SM, Szafer A, Tieu M, Torkelson A, Trinh J, Tung H, Wakeman W, Ward K, Williams G, Zhou Z, Ting JT, Arkhipov A, Sumbul U, Lein ES, Koch C, Yao Z, Tasic B, Berg J, Murphy GJ, Zeng H (2020) Integrated Morphoelectric and Transcriptomic Classification of Cortical GABAergic Cells. Cell 183:935{953.e19.</p><p>Harris KD, Henze DA, Csicsvari J, Hirase H, Buzsaki G (2000) Accuracy of Tetrode Spike</p><p>Separation as Determined by Simultaneous Intracellular and Extracellular Measurements. Journal of Neurophysiology 84:401{414.</p><p>Jia X, Siegle JH, Bennett C, Gale SD, Denman DJ, Koch C, Olsen SR (2019) High-density</p><p>extracellular probes reveal dendritic backpropagation and facilitate neuron classification. Journal of Neurophysiology 121:1831{1847.</p><p>Lambiotte R (2007) Finding communities at different resolutions in large networks.</p><p>Linderman GC, Rachh M, Hoskins JG, Steinerberger S, Kluger Y (2019) Fast interpolation-based t-SNE for improved visualization of single-cell RNA-seq data. Nature Methods 16:243{245.</p><p>Lundberg S, Lee SI (2017) A Unified Approach to Interpreting Model Predictions. arXiv.</p><p>Mahallati S, Bezdek JC, Popovic MR, Valiante TA (2019) Cluster tendency assessment in</p><p>neuronal spike data. PLOS ONE 14:e0224547.</p><p>McInnes L, Healy J, Melville J (2018) Umap: Uniform manifold approximation and projection for dimension reduction. arXiv.</p><p>Moscovich A, Rosset S (2019) On the cross-validation bias due to unsupervised pre-processing. arXiv.</p><p>Niell CM, Stryker MP (2008) Highly Selective Receptive Fields in Mouse Visual Cortex. The Journal of Neuroscience 28:7520{7536.</p><p>Nolet CJ, Lafargue V, Raff E, Nanditale T, Oates T, Zedlewski J, Patterson J (2020) Bringing UMAP Closer to the Speed of Light with GPU Acceleration. arXiv.</p><p>Pachitariu M, Steinmetz N, Kadir S, Carandini M, D. HK (2016) Kilosort: realtime spike-sorting for extracellular electrophysiology with hundreds of channels. bioRxiv p. 061481.</p><p>Paulk AC, Kffir Y, Khanna A, Mustroph M, Trautmann EM, Soper DJ, Stavisky SD,</p><p>Welkenhuysen M, Dutta B, Shenoy KV, Hochberg LR, Richardson M, Williams ZM, Cash SS (2021) Large-scale neural recordings with single-cell resolution in human cortex using high-density neuropixels probes. bioRxiv.</p><p>Peyrache A, Destexhe A (2019) Electrophysiological monitoring of inhibition in mammalian species, from rodents to humans. Neurobiology of Disease 130:104500.</p><p>Poulin V, Theberge F (2018) Ensemble Clustering for Graphs. arXiv.</p><p>Quirk MC, Wilson MA (1999) Interaction between spike waveform classification and temporal sequence detection. Journal of Neuroscience Methods 94:41{52.</p><p>Robbins AA, Fox SE, Holmes GL, Scott RC, Barry JM (2013) Short duration waveforms</p><p>recorded extracellularly from freely moving rats are representative of axonal activity. Frontiers in Neural Circuits 7:181.</p><p>Romano S, Vinh NX, Bailey J, Verspoor K (2016) Adjusting for chance clustering comparison measures. The Journal of Machine Learning Research 17:4635{4666.</p><p>12-02-2021-RA-<italic>eLife</italic>-67490 27 July 23, 2021</p><p>Rosenberg A, Hirschberg J (2007) V-measure: A conditional entropy-based external cluster evaluation measure In Proceedings of the 2007 Joint Conference on Empirical Methods in Natural Language Processing and Computational Natural Language Learning (EMNLP-CoNLL), pp. 410{420, Prague, Czech Republic. Association for Computational Linguistics.</p><p>Sedaghat-Nejad E, Fakharian MA, &#960; J, Hage P, Kojima Y, Soetedjo R, Ohmae S, Medina JF,</p><p>Shadmehr R (2021) P-sort: an open-source software for cerebellar neurophysiology. bioRxiv.</p><p>Steinmetz N, Zatka-Haas P, Carandini M, Harris K (2018) Distributed correlates of</p><p>visually-guided behavior across the mouse brain. bioRxiv p. 474437.</p><p>Stringer C, Pachitariu M, Steinmetz NA, Okun M, Bartho P, Harris KD, Sahani M, Lesica NA (2016) Inhibitory control of correlated intrinsic variability in cortical networks. <italic>eLife</italic> 5:e19695.</p><p>Tibshirani R, Walther G (2012) Cluster Validation by Prediction Strength. Journal of</p><p>Computational and Graphical Statistics 14:511{528.</p><p>Timme NM, Lapish C (2018) A Tutorial for Information Theory in Neuroscience.</p><p>eNeuro 5:ENEURO.0052{18.2018.</p><p>Yu J, Hu H, Agmon A, Svoboda K (2019) Recruitment of GABAergic Interneurons in the Barrel Cortex during Active Tactile Behavior. Neuron 104:412{427.e4.</p></body></sub-article></article>