<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Archiving and Interchange DTD v1.1 20151215//EN" "JATS-archivearticle1.dtd">
<article xmlns:ali="http://www.niso.org/schemas/ali/1.0/" xmlns:xlink="http://www.w3.org/1999/xlink" article-type="research-article" dtd-version="1.1"><front><journal-meta><journal-id journal-id-type="nlm-ta">elife</journal-id><journal-id journal-id-type="publisher-id">eLife</journal-id><journal-title-group><journal-title>eLife</journal-title></journal-title-group><issn pub-type="epub" publication-format="electronic">2050-084X</issn><publisher><publisher-name>eLife Sciences Publications, Ltd</publisher-name></publisher></journal-meta><article-meta><article-id pub-id-type="publisher-id">65751</article-id><article-id pub-id-type="doi">10.7554/eLife.65751</article-id><article-categories><subj-group subj-group-type="display-channel"><subject>Tools and Resources</subject></subj-group><subj-group subj-group-type="heading"><subject>Neuroscience</subject></subj-group></article-categories><title-group><article-title>Visualizing anatomically registered data with brainrender</article-title></title-group><contrib-group><contrib contrib-type="author" corresp="yes" id="author-219355"><name><surname>Claudi</surname><given-names>Federico</given-names></name><email>federico.claudi.17@ucl.ac.uk</email><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="fn" rid="con1"/><xref ref-type="fn" rid="conf1"/></contrib><contrib contrib-type="author" id="author-219356"><name><surname>Tyson</surname><given-names>Adam L</given-names></name><contrib-id authenticated="true" contrib-id-type="orcid">https://orcid.org/0000-0003-3225-1130</contrib-id><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="fn" rid="con2"/><xref ref-type="fn" rid="conf1"/></contrib><contrib contrib-type="author" id="author-219358"><name><surname>Petrucco</surname><given-names>Luigi</given-names></name><xref ref-type="aff" rid="aff2">2</xref><xref ref-type="aff" rid="aff3">3</xref><xref ref-type="fn" rid="con3"/><xref ref-type="fn" rid="conf1"/></contrib><contrib contrib-type="author" id="author-219354"><name><surname>Margrie</surname><given-names>Troy W</given-names></name><contrib-id authenticated="true" contrib-id-type="orcid">http://orcid.org/0000-0002-5526-4578</contrib-id><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="other" rid="fund1"/><xref ref-type="other" rid="fund2"/><xref ref-type="other" rid="fund4"/><xref ref-type="fn" rid="con4"/><xref ref-type="fn" rid="conf1"/></contrib><contrib contrib-type="author" id="author-37018"><name><surname>Portugues</surname><given-names>Ruben</given-names></name><contrib-id authenticated="true" contrib-id-type="orcid">http://orcid.org/0000-0002-1495-9314</contrib-id><xref ref-type="aff" rid="aff2">2</xref><xref ref-type="aff" rid="aff3">3</xref><xref ref-type="aff" rid="aff4">4</xref><xref ref-type="other" rid="fund5"/><xref ref-type="fn" rid="con5"/><xref ref-type="fn" rid="conf1"/></contrib><contrib contrib-type="author" corresp="yes" id="author-31485"><name><surname>Branco</surname><given-names>Tiago</given-names></name><contrib-id authenticated="true" contrib-id-type="orcid">https://orcid.org/0000-0001-5087-3465</contrib-id><email>t.branco@ucl.ac.uk</email><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="other" rid="fund1"/><xref ref-type="other" rid="fund3"/><xref ref-type="other" rid="fund4"/><xref ref-type="fn" rid="con6"/><xref ref-type="fn" rid="conf1"/></contrib><aff id="aff1"><label>1</label><institution>UCL Sainsbury Wellcome Centre</institution><addr-line><named-content content-type="city">London</named-content></addr-line><country>United Kingdom</country></aff><aff id="aff2"><label>2</label><institution>Institute of Neuroscience, Technical University of Munich</institution><addr-line><named-content content-type="city">Munich</named-content></addr-line><country>Germany</country></aff><aff id="aff3"><label>3</label><institution>Max Planck Institute of Neurobiology, Research Group of Sensorimotor Control</institution><addr-line><named-content content-type="city">Martinsried</named-content></addr-line><country>Germany</country></aff><aff id="aff4"><label>4</label><institution>Munich Cluster for Systems Neurology (SyNergy)</institution><addr-line><named-content content-type="city">Munich</named-content></addr-line><country>Germany</country></aff></contrib-group><contrib-group content-type="section"><contrib contrib-type="editor"><name><surname>Mathis</surname><given-names>Mackenzie W</given-names></name><role>Reviewing Editor</role><aff><institution>EPFL</institution><country>Switzerland</country></aff></contrib><contrib contrib-type="senior_editor"><name><surname>Wassum</surname><given-names>Kate M</given-names></name><role>Senior Editor</role><aff><institution>University of California, Los Angeles</institution><country>United States</country></aff></contrib></contrib-group><pub-date date-type="publication" publication-format="electronic"><day>19</day><month>03</month><year>2021</year></pub-date><pub-date pub-type="collection"><year>2021</year></pub-date><volume>10</volume><elocation-id>e65751</elocation-id><history><date date-type="received" iso-8601-date="2020-12-15"><day>15</day><month>12</month><year>2020</year></date><date date-type="accepted" iso-8601-date="2021-03-17"><day>17</day><month>03</month><year>2021</year></date></history><permissions><copyright-statement>&#169; 2021, Claudi et al</copyright-statement><copyright-year>2021</copyright-year><copyright-holder>Claudi et al</copyright-holder><ali:free_to_read/><license xlink:href="http://creativecommons.org/licenses/by/4.0/"><ali:license_ref>http://creativecommons.org/licenses/by/4.0/</ali:license_ref><license-p>This article is distributed under the terms of the <ext-link ext-link-type="uri" xlink:href="http://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution License</ext-link>, which permits unrestricted use and redistribution provided that the original author and source are credited.</license-p></license></permissions><self-uri content-type="pdf" xlink:href="elife-65751-v3.pdf"/><abstract><p>Three-dimensional (3D) digital brain atlases and high-throughput brain-wide imaging techniques generate large multidimensional datasets that can be registered to a common reference frame. Generating insights from such datasets depends critically on visualization and interactive data exploration, but this a challenging task. Currently available software is dedicated to single atlases, model species or data types, and generating 3D renderings that merge anatomically registered data from diverse sources requires extensive development and programming skills. Here, we present brainrender: an open-source Python package for interactive visualization of multidimensional datasets registered to brain atlases. Brainrender facilitates the creation of complex renderings with different data types in the same visualization and enables seamless use of different atlas sources. High-quality visualizations can be used interactively and exported as high-resolution figures and animated videos. By facilitating the visualization of anatomically registered data, brainrender should accelerate the analysis, interpretation, and dissemination of brain-wide multidimensional data.</p></abstract><kwd-group kwd-group-type="author-keywords"><kwd>software</kwd><kwd>data visualization</kwd><kwd>open source</kwd><kwd>anatomy</kwd></kwd-group><kwd-group kwd-group-type="research-organism"><title>Research organism</title><kwd>None</kwd></kwd-group><funding-group><award-group id="fund1"><funding-source><institution-wrap><institution-id institution-id-type="FundRef">http://dx.doi.org/10.13039/501100000324</institution-id><institution>Gatsby Charitable Foundation</institution></institution-wrap></funding-source><award-id>GAT3361</award-id><principal-award-recipient><name><surname>Margrie</surname><given-names>Troy W</given-names></name><name><surname>Branco</surname><given-names>Tiago</given-names></name></principal-award-recipient></award-group><award-group id="fund2"><funding-source><institution-wrap><institution-id institution-id-type="FundRef">http://dx.doi.org/10.13039/100004440</institution-id><institution>Wellcome</institution></institution-wrap></funding-source><award-id>214333/Z/18/Z</award-id><principal-award-recipient><name><surname>Margrie</surname><given-names>Troy W</given-names></name></principal-award-recipient></award-group><award-group id="fund3"><funding-source><institution-wrap><institution-id institution-id-type="FundRef">http://dx.doi.org/10.13039/100004440</institution-id><institution>Wellcome</institution></institution-wrap></funding-source><award-id>214352/Z/18/Z</award-id><principal-award-recipient><name><surname>Branco</surname><given-names>Tiago</given-names></name></principal-award-recipient></award-group><award-group id="fund4"><funding-source><institution-wrap><institution-id institution-id-type="FundRef">http://dx.doi.org/10.13039/100004440</institution-id><institution>Wellcome</institution></institution-wrap></funding-source><award-id>090843/F/09/Z</award-id><principal-award-recipient><name><surname>Margrie</surname><given-names>Troy W</given-names></name><name><surname>Branco</surname><given-names>Tiago</given-names></name></principal-award-recipient></award-group><award-group id="fund5"><funding-source><institution-wrap><institution-id institution-id-type="FundRef">http://dx.doi.org/10.13039/501100001659</institution-id><institution>Deutsche Forschungsgemeinschaft</institution></institution-wrap></funding-source><award-id>390857198</award-id><principal-award-recipient><name><surname>Portugues</surname><given-names>Ruben</given-names></name></principal-award-recipient></award-group><funding-statement>The funders had no role in study design, data collection and interpretation, or the decision to submit the work for publication.</funding-statement></funding-group><custom-meta-group><custom-meta specific-use="meta-only"><meta-name>Author impact statement</meta-name><meta-value>Brainrender is an open-source and user-friendly software for combining any type of anatomically registered data into custom interactive 3D renderings.</meta-value></custom-meta></custom-meta-group></article-meta></front><body><sec id="s1" sec-type="intro"><title>Introduction</title><p>Understanding how nervous systems generate behavior benefits from gathering multidimensional data from different individual animals. These data range from neural activity recordings and anatomical connectivity, to cellular and subcellular information such as morphology and gene expression profiles. These different types of data should ideally all be in register so that, for example, neural activity in one brain region can be interpreted in light of the connectivity of that region or the cell types it contains. Such registration, however, is challenging. Often it is not technically feasible to obtain multidimensional data in a single experiment, and registration to a common reference frame must be performed post&#160;hoc. Even for the same experiment type, registration is necessary to allow comparisons across individual animals (<xref ref-type="bibr" rid="bib25">Simmons and Swanson, 2009</xref>).</p><p>While different types of references can in principle be used, neuroanatomical location is a natural and most commonly used reference frame (<xref ref-type="bibr" rid="bib6">Chon et al., 2019</xref>; <xref ref-type="bibr" rid="bib21">Oh et al., 2014</xref>; <xref ref-type="bibr" rid="bib2">Arganda-Carreras et al., 2018</xref>; <xref ref-type="bibr" rid="bib15">Kunst et al., 2019</xref>). In recent years, several high-resolution three-dimensional&#160;(3D) digital brain atlases have been generated for model species commonly used in neuroscience (<xref ref-type="bibr" rid="bib32">Wang et al., 2020</xref>; <xref ref-type="bibr" rid="bib21">Oh et al., 2014</xref>; <xref ref-type="bibr" rid="bib2">Arganda-Carreras et al., 2018</xref>; <xref ref-type="bibr" rid="bib15">Kunst et al., 2019</xref>). These atlases provide a framework for registering different types of data across macro- and microscopic scales. A key output of this process is the visualization of all datasets in register. Given the intrinsically 3D geometry of brain structures and individual neurons, 3D renderings are more readily understandable and can provide more information when compared to two dimensional images. Exploring interactive 3D visualizations of the brain gives an overview of the relationship between datasets and brain regions and helps generating intuitive insights about these relationships. This is particularly important for large-scale datasets such as the ones generated by open-science projects like MouseLight (<xref ref-type="bibr" rid="bib33">Winnubst et al., 2019</xref>) and the Allen Mouse Connectome (<xref ref-type="bibr" rid="bib21">Oh et al., 2014</xref>). In addition, high-quality 3D visualizations facilitate the communication of experimental results registered to brain anatomy.</p><p>Generating custom 3D visualizations of atlas data requires programmatic access to the atlas. While some of the recently developed atlases provide an API (Application Programming Interface) for accessing atlas data (<xref ref-type="bibr" rid="bib32">Wang et al., 2020</xref>; <xref ref-type="bibr" rid="bib15">Kunst et al., 2019</xref>), rendering these data in 3D remains a demanding and time-consuming task that requires significant programming skills. Moreover, visualization of user-generated data registered onto the atlas requires an interface between the user data and the atlas data, which further requires advanced programming knowledge and extensive development. There is therefore the need for software that can simplify the process of visualizing 3D anatomical data from available atlases and from new experimental datasets.</p><p>Currently, existing software packages such as cocoframer (<xref ref-type="bibr" rid="bib16">Lein et al., 2007</xref>), BrainMesh (<xref ref-type="bibr" rid="bib34">Yaoyao, 2020</xref>), and SHARPTRACK (<xref ref-type="bibr" rid="bib24">Shamash et al., 2018</xref>) provide some functionality for 3D rendering of anatomical data. These packages, however, are only compatible with a single atlas and cannot be used to render data from different atlases or different animal species. Achieving this requires adapting the existing software to the different atlases datasets or developing new dedicated software all together, at the cost of significant additional efforts, often duplicated. An important limitation of the currently available software is that it frequently does not support rendering of non-atlas data, such as data from publicly available datasets (e.g. MouseLight) or produced by individual laboratories. This capability is essential for easily mapping newly generated data onto brain anatomy at high&#160;resolution and produce visualizations of multidimensional datasets. More advanced software such as natverse (<xref ref-type="bibr" rid="bib5">Bates et al., 2020</xref>) offers extensive data visualization and analysis functionality, but currently, it is mostly restricted to data obtained from the <italic>Drosophila</italic> brain. Simple Neurite Tracer (<xref ref-type="bibr" rid="bib3">Arshadi et al., 2020</xref>), an ImageJ-based software, can render neuronal morphological data from public and user-generated datasets and is compatible with several reference atlases. However, this software does not support visualization of data other than neuronal morphological reconstructions nor can it be easily adapted to work with different or new atlases beyond the ones already supported. Finally, software such as MagellanMapper (<xref ref-type="bibr" rid="bib35">Young et al., 2020</xref>) can be used to visualize and analyze large 3D brain imaging datasets, but the visualization is restricted to one data item (i.e. images from one individual brain). It is therefore not possible to combine data from different sources into a single visualization. Ideally, a rendering software should work with 3D mesh data instead of 3D voxel image data to allow the creation of high-quality renderings and facilitate the integration of data from different sources.</p><p>An additional consideration is that existing software tools for programmatic neuroanatomical renderings have been developed in programming languages such as R and Matlab, and there is currently no available alternative in Python. The popularity of Python within the neuroscientific community has grown tremendously in recent years (<xref ref-type="bibr" rid="bib19">Muller et al., 2015</xref>). Building on Python&#8217;s simple syntax and free, high-quality data processing and analysis packages, several open-source tools directly aimed at neuroscientists have been written in Python and are increasingly used (e.g., <xref ref-type="bibr" rid="bib18">Mathis et al., 2018</xref>; <xref ref-type="bibr" rid="bib22">Pachitariu et al., 2017</xref>; <xref ref-type="bibr" rid="bib31">Tyson and Rousseau, 2020b</xref>). Developing a python-based software for universal generation of 3D renderings of anatomically registered data can therefore take advantage of the increasing strength and depth of the python neuroscience community for testing and further development.</p><p>For these reasons, we have developed brainrender: an open-source python package for creating high-resolution, interactive 3D renderings of anatomically registered data. Brainrender is written in Python and integrated with BrainGlobe&#8217;s AtlasAPI (<xref ref-type="bibr" rid="bib7">Claudi et al., 2020</xref>) to interface natively with different atlases without need for modification. Brainrender supports the visualization of data acquired with different techniques and at different scales. Data from multiple sources can be combined in a single rendering to produce rich and informative visualizations of multidimensional data. Brainrender can also be used to create high-resolution, publication-ready images and videos (see <xref ref-type="bibr" rid="bib31">Tyson and Rousseau, 2020b</xref>; <xref ref-type="bibr" rid="bib1">Adkins et al., 2020</xref>), as well as interactive online visualizations to facilitate the dissemination of anatomically registered data. Finally, using brainrender requires minimal programming skills, which should accelerate the adoption of this new software by the research community. All brainrender code is available at the GitHub repository together with extensive online documentation and examples.</p></sec><sec id="s2" sec-type="results"><title>Results</title><sec id="s2-1"><title>Design principles and implementation</title><p>A core design goal for brainrender was to generate a visualization software compatible with any reference atlas, thus providing a generic and flexible tool (<xref ref-type="fig" rid="fig1">Figure 1A</xref>). To achieve this goal, brainrender has been developed as part of the BrainGlobe&#8217;s computational neuroanatomy software suite. In particular, we integrated brainrender directly with BrainGlobe&#8217;s AtlasAPI (<xref ref-type="bibr" rid="bib7">Claudi et al., 2020</xref>). The AtlasAPI can download and access atlas data from several supported atlases in an unified format. Brainrender uses the AtlasAPI to access 3D mesh data from individual brain regions as well as metadata about the hierarchical organization of the brain&#8217;s structures (<xref ref-type="fig" rid="fig1">Figure 1B</xref>). Thus, the same programming interface can be used to access data from any atlas (see code examples in <xref ref-type="fig" rid="fig2">Figure 2</xref>), including recently developed ones (e.g. the enhanced and unified mouse brain atlas, <xref ref-type="bibr" rid="bib6">Chon et al., 2019</xref>).</p><fig id="fig1" position="float"><label>Figure 1.</label><caption><title>Design principles.</title><p>(<bold>A</bold>) Schematic illustration of how different types of data can be loaded into brainrender using either brainrender&#8217;s own functions, software packages from the BrainGlobe suite, or custom Python scripts. All data loaded into brainrender is converted to a unified format, which simplifies the process of visualizing data from different sources. (<bold>B</bold>) Using brainrender with different atlases. Visualization of brain atlas data from three different atlases using brainrender. Left, Allen atlas of the mouse brain showing the superficial (SCs) and motor (SCm) subdivisions of the superior colliculus and the Zona Incerta (data from <xref ref-type="bibr" rid="bib32">Wang et al., 2020</xref>). Middle, visualization of the cerebellum and tectum in the larval zebrafish brain (data from <xref ref-type="bibr" rid="bib15">Kunst et al., 2019</xref>). Right, visualization of the precentral gyrus, postcentral gyrus, and temporal lobe of the human brain (data from <xref ref-type="bibr" rid="bib8">Ding et al., 2016</xref>). (<bold>C</bold>) The brainrender GUI. Mouse, human, and zebrafish larvae drawings from <ext-link ext-link-type="uri" xlink:href="https://scidraw.io/">scidraw.io</ext-link> (<ext-link ext-link-type="uri" xlink:href="http://doi.org/10.5281/zenodo.3925991">doi.org/10.5281/zenodo.3925991</ext-link>, <ext-link ext-link-type="uri" xlink:href="http://doi.org/10.5281/zenodo.3926189">doi.org/10.5281/zenodo.3926189</ext-link>, <ext-link ext-link-type="uri" xlink:href="http://doi.org/10.5281/zenodo.3926123">doi.org/10.5281/zenodo.3926123</ext-link>).</p></caption><graphic mime-subtype="jpeg" mimetype="image" xlink:href="elife-65751.xml.media/fig1.jpg"/></fig><fig id="fig2" position="float"><label>Figure 2.</label><caption><title>Code examples.</title><p>Example python code for visualizing brain regions in the mouse and larval zebrafish brains. The same commands can be used for both atlases and switching between atlases can be done by simply specifying which atlas to use when creating the visualization. Further examples can be found in brainrender&#8217;s GitHub repository.</p></caption><graphic mime-subtype="jpeg" mimetype="image" xlink:href="elife-65751.xml.media/fig2.jpg"/></fig><p>The second major design principle was to enable rendering of any data type that can be registered to a reference atlas, either from publicly available datasets or from individual laboratories. Brainrender can directly visualize data produced with any analysis software from the BrainGlobe suite, including cellfinder (<xref ref-type="bibr" rid="bib30">Tyson et al., 2020a</xref>) and brainreg (<xref ref-type="bibr" rid="bib31">Tyson and Rousseau, 2020b</xref>). In addition, brainrender provides functionality for easily loading and visualizing commonly used data types such as .npy files with cell coordinates or image data,&#160;.obj, and&#160;.stl files with 3D mesh data and .json files with streamlines data for mesoscale connectomics. Additional information about the file formats accepted by brainrender can be found in the online documentation. Brainglobe&#8217;s software suite also includes imio which can load data from several file types (e.g. tiff and .nii), and additional file formats can be loaded through the numerous packages provided by the python ecosystem. Finally, the existing loading functionality can be easily expanded to support user-specific needs by directly plugging in custom user code into the brainrender interface (<xref ref-type="fig" rid="fig1">Figure 1A</xref>).</p><p>One of the goals of brainrender is to facilitate the creation of high-resolution images, animated videos, and interactive online visualizations from any anatomically registered data. Brainrender uses vedo as the rendering engine (<xref ref-type="bibr" rid="bib20">Musy et al., 2019</xref>), a state-of-the-art tool that enables fast, high-quality rendering with minimal hardware requirements.</p><p>High-resolution renderings of rich 3D scenes can be produced rapidly (e.g. 10,000 cells in less than 2&#160;s) in standard laptop or desktop configurations. Benchmarking tests across different operating systems and machine configurations show that using a GPU can increase the framerate of interactive renderings by a factor of 3.5 (see <xref ref-type="table" rid="table1">Tables 1</xref> and <xref ref-type="table" rid="table2">2</xref> in Materials&#160;and&#160;methods). This performance increase, however, depends on the complexity of the pre-processing steps, such as data loading and mesh generation, which run on the CPU. As one the main goals of brainrender is to produce high-resolution visualizations, we have made the rendering quality independent of hardware configuration, which only affects the rendering time. Animated videos and online visualizations can be produced with a few lines of code in brainrender. Several options are provided for easily customizing the appearance of rendered objects, thus enabling high-quality, rich data visualizations that combine multiple data sources.</p><table-wrap id="table1" position="float"><label>Table 1.</label><caption><title>Machine configurations used for benchmark tests.</title></caption><table frame="hsides" rules="groups"><thead><tr><th valign="top">N</th><th valign="top">OS</th><th valign="top">CPU</th><th valign="top">GPU</th></tr></thead><tbody><tr><td valign="top">1</td><td valign="top">Macos Mojave 10.14.6</td><td valign="top">2.3 ghz Intel Core i9</td><td valign="top">Radeon Pro 560 &#215; 4 GB GPU</td></tr><tr><td valign="top">2</td><td valign="top">Ubuntu 18.04.2 LTS x86 64</td><td valign="top">Intel i7-8565U (x) @ 4.5 ghz</td><td valign="top">NO GPU</td></tr><tr><td valign="top">3</td><td valign="top">Windows 10</td><td valign="top">Intel(R) Core i7-7700HQ 2.8 ghz</td><td valign="top">NO GPU</td></tr><tr><td valign="top">4</td><td valign="top">Windows 10</td><td valign="top">Intel(R) Xeon(R) CPU E5-2643 v3 3.4 ghz</td><td valign="top">NVIDIA geforce GTX 1080 Ti</td></tr></tbody></table></table-wrap><table-wrap id="table2" position="float"><label>Table 2.</label><caption><title>Benchmark tests results.</title><p>The number of actors refers to the total number of elements rendered, and the number of vertices refers to the total number of mesh vertices in the rendering.</p></caption><table frame="hsides" rules="groups"><thead><tr><th valign="top">Test</th><th valign="top">Machine</th><th valign="top">GPU</th><th valign="top"># actors</th><th valign="top"># vertices</th><th valign="top">FPS</th><th valign="top">Run duration</th></tr></thead><tbody><tr><td valign="top">10 k cells</td><td valign="top">1</td><td valign="top">Yes</td><td valign="top">3</td><td valign="top">1,029,324</td><td valign="top">24.76</td><td valign="top">0.81</td></tr><tr><td valign="top"/><td valign="top">2</td><td valign="top">No</td><td valign="top">3</td><td valign="top">1,029,324</td><td valign="top">22.46</td><td valign="top">1.16</td></tr><tr><td valign="top"/><td valign="top">3</td><td valign="top">No</td><td valign="top">3</td><td valign="top">1,029,324</td><td valign="top">20.00</td><td valign="top">1.41</td></tr><tr><td valign="top"/><td valign="top">4</td><td valign="top">Yes</td><td valign="top">3</td><td valign="top">1,029,324</td><td valign="top">100.00</td><td valign="top">1.34</td></tr><tr><td valign="top">100 k cells</td><td valign="top">1</td><td valign="top">Yes</td><td valign="top">3</td><td valign="top">9,849,324</td><td valign="top">18.87</td><td valign="top">3.23</td></tr><tr><td valign="top"/><td valign="top">2</td><td valign="top">No</td><td valign="top">3</td><td valign="top">9,849,324</td><td valign="top">14.91</td><td valign="top">4.34</td></tr><tr><td valign="top"/><td valign="top">3</td><td valign="top">No</td><td valign="top">3</td><td valign="top">9,849,324</td><td valign="top">0.43</td><td valign="top">7.94</td></tr><tr><td valign="top"/><td valign="top">4</td><td valign="top">Yes</td><td valign="top">3</td><td valign="top">9,849,324</td><td valign="top">1.20</td><td valign="top">1.13</td></tr><tr><td valign="top">1 M cells</td><td valign="top">1</td><td valign="top">Yes</td><td valign="top">3</td><td valign="top">98,049,324</td><td valign="top">2.65</td><td valign="top">31.01</td></tr><tr><td valign="top"/><td valign="top">2</td><td valign="top">No</td><td valign="top">3</td><td valign="top">98,049,324</td><td valign="top">2.55</td><td valign="top">96.49</td></tr><tr><td valign="top"/><td valign="top">3</td><td valign="top">No</td><td valign="top">3</td><td valign="top">98,049,324</td><td valign="top">0.03</td><td valign="top">86.75</td></tr><tr><td valign="top"/><td valign="top">4</td><td valign="top">Yes</td><td valign="top">3</td><td valign="top">9,8049,324</td><td valign="top">0.13</td><td valign="top">36.57</td></tr><tr><td valign="top">Slicing 10 k cells</td><td valign="top">1</td><td valign="top">Yes</td><td valign="top">3</td><td valign="top">237,751</td><td valign="top">37.64</td><td valign="top">0.96</td></tr><tr><td valign="top"/><td valign="top">2</td><td valign="top">No</td><td valign="top">3</td><td valign="top">237,751</td><td valign="top">39.10</td><td valign="top">1.25</td></tr><tr><td valign="top"/><td valign="top">3</td><td valign="top">No</td><td valign="top">3</td><td valign="top">237,751</td><td valign="top">26.32</td><td valign="top">1.88</td></tr><tr><td valign="top"/><td valign="top">4</td><td valign="top">Yes</td><td valign="top">3</td><td valign="top">237,751</td><td valign="top">200.00</td><td valign="top">1.34</td></tr><tr><td valign="top">Slicing 100 k cells</td><td valign="top">1</td><td valign="top">Yes</td><td valign="top">3</td><td valign="top">276,092</td><td valign="top">31.79</td><td valign="top">7.77</td></tr><tr><td valign="top"/><td valign="top">2</td><td valign="top">No</td><td valign="top">3</td><td valign="top">276,092</td><td valign="top">25.98</td><td valign="top">9.09</td></tr><tr><td valign="top"/><td valign="top">3</td><td valign="top">No</td><td valign="top">3</td><td valign="top">276,092</td><td valign="top">21.28</td><td valign="top">16.88</td></tr><tr><td valign="top"/><td valign="top">4</td><td valign="top">Yes</td><td valign="top">3</td><td valign="top">276,092</td><td valign="top">111.11</td><td valign="top">9.65</td></tr><tr><td valign="top">Slicing 1 M cells</td><td valign="top">1</td><td valign="top">Yes</td><td valign="top">3</td><td valign="top">275,069</td><td valign="top">11.23</td><td valign="top">91.31</td></tr><tr><td valign="top"/><td valign="top">2</td><td valign="top">No</td><td valign="top">3</td><td valign="top">275,069</td><td valign="top">5.39</td><td valign="top">104.79</td></tr><tr><td valign="top"/><td valign="top">3</td><td valign="top">No</td><td valign="top">3</td><td valign="top">275,069</td><td valign="top">5.03</td><td valign="top">158.99</td></tr><tr><td valign="top"/><td valign="top">4</td><td valign="top">Yes</td><td valign="top">3</td><td valign="top">275,069</td><td valign="top">37.04</td><td valign="top">97.43</td></tr><tr><td valign="top">Brain regions</td><td valign="top">1</td><td valign="top">Yes</td><td valign="top">1678</td><td valign="top">1,864,388</td><td valign="top">9.38</td><td valign="top">11.78</td></tr><tr><td valign="top"/><td valign="top">2</td><td valign="top">No</td><td valign="top">1678</td><td valign="top">1,864,388</td><td valign="top">7.61</td><td valign="top">27.40</td></tr><tr><td valign="top"/><td valign="top">3</td><td valign="top">No</td><td valign="top">1678</td><td valign="top">1,864,388</td><td valign="top">6.49</td><td valign="top">46.79</td></tr><tr><td valign="top"/><td valign="top">4</td><td valign="top">Yes</td><td valign="top">1678</td><td valign="top">1,864,388</td><td valign="top">11.90</td><td valign="top">35.83</td></tr><tr><td valign="top">Animation</td><td valign="top">1</td><td valign="top">Yes</td><td valign="top">8</td><td valign="top">96,615</td><td valign="top">9.91</td><td valign="top">18.98</td></tr><tr><td valign="top"/><td valign="top">2</td><td valign="top">No</td><td valign="top">8</td><td valign="top">96,615</td><td valign="top">22.12</td><td valign="top">12.63</td></tr><tr><td valign="top"/><td valign="top">3</td><td valign="top">No</td><td valign="top">8</td><td valign="top">96,615</td><td valign="top">15.15</td><td valign="top">11.92</td></tr><tr><td valign="top"/><td valign="top">4</td><td valign="top">Yes</td><td valign="top">8</td><td valign="top">96,615</td><td valign="top">47.62</td><td valign="top">12.29</td></tr><tr><td valign="top">Volume</td><td valign="top">1</td><td valign="top">Yes</td><td valign="top">12</td><td valign="top">49,324</td><td valign="top">1.79</td><td valign="top">2.31</td></tr><tr><td valign="top"/><td valign="top">2</td><td valign="top">No</td><td valign="top">12</td><td valign="top">49,324</td><td valign="top">1.66</td><td valign="top">1.95</td></tr><tr><td valign="top"/><td valign="top">3</td><td valign="top">No</td><td valign="top">12</td><td valign="top">49,324</td><td valign="top">3.55</td><td valign="top">2.15</td></tr><tr><td valign="top"/><td valign="top">4</td><td valign="top">Yes</td><td valign="top">12</td><td valign="top">49,324</td><td valign="top">23.26</td><td valign="top">1.21</td></tr></tbody></table></table-wrap><p>Finally, we aimed for brainrender to empower scientists with little or no programming experience to generate advanced visualizations of their anatomically registered data. To make brainrender as user-friendly as possible we have produced extensive documentation, tutorials and examples for installing and using the software. We have also developed a graphic user interface (GUI) to access most of brainrender&#8217;s core functionality. This GUI can be used to perform actions such as rendering of brain regions and labeled cells (e.g. from cellfinder) and creating images of the rendered data, without writing custom python code (<xref ref-type="fig" rid="fig1">Figure 1C</xref>), (<xref ref-type="video" rid="video1">Video 1</xref>).</p><media id="video1" mime-subtype="mp4" mimetype="video" xlink:href="elife-65751-video1.mp4"><label>Video 1.</label><caption><title>Example brainrender GUI usage.</title><p>Short demonstration of how brainrender's GUI can be used to interactively visualize brain regions, labeled cells, and custom meshes.</p></caption></media></sec><sec id="s2-2"><title>Visualizing brain regions and other structures</title><p>A key element of any neuroanatomical visualization is the rendering of the entire outline of the brain as well as the borders of brain regions of interest. In brainrender, this can easily be achieved by specifying which brain regions to include in the rendering. The software will then use BrainGlobe&#8217;s AtlasAPI to load the 3D data and subsequently renders them (<xref ref-type="fig" rid="fig1">Figure 1B</xref>).</p><p>Brainrender can also render brain areas defined by factors other than anatomical location, such as gene expression levels or functional properties. These can be loaded either directly as 3D mesh data after processing with dedicated software (e.g., <xref ref-type="bibr" rid="bib30">Tyson et al., 2020a</xref>; <xref ref-type="bibr" rid="bib27">Song et al., 2020</xref>; <xref ref-type="bibr" rid="bib13">Jin et al., 2019</xref>; <xref ref-type="fig" rid="fig3">Figure 3A</xref>) or as 3D volumetric data (<xref ref-type="fig" rid="fig3">Figure 3E</xref>). For the latter, brainrender takes care of the conversion of voxels into a 3D mesh for rendering. Furthermore, custom 3D meshes can be created to visualize different types of data. For example, brainrender can import JSON files with tractography connectivity data and create &#8216;streamlines&#8217; to visualize efferent projections from a brain region of interested (<xref ref-type="fig" rid="fig3">Figure 3B</xref>).</p><fig id="fig3" position="float"><label>Figure 3.</label><caption><title>Visualizing different types of data in brainrender.</title><p>(<bold>A</bold>) Spread of fluorescence labeling following viral injection of AAV2-CRE-eGPF in the superior colliculus of two FLEX-TdTomato mice. 3D objects showing the injection sites were created using custom python scripts following acquisition of a 3D image of the entire brain with serial two-photon tomography and registration of the image data to the atlas&#8217; template (with brainreg, <xref ref-type="bibr" rid="bib30">Tyson et al., 2020a</xref>). (<bold>B</bold>) Streamlines visualization of efferent projections from the mouse primary motor cortex following injection of an anterogradely transported virus expressing fluorescent proteins (original data from <xref ref-type="bibr" rid="bib21">Oh et al., 2014</xref>), downloaded from (Neuroinformatics NL with brainrender). (<bold>C</bold>) Visualization of the location of several implanted neuropixel probes from multiple mice (data from <xref ref-type="bibr" rid="bib28">Steinmetz et al., 2019</xref>). Dark salmon colored tracks show probes going through both primary/anterior visual cortex (VISp/VISa) and the dorsal lateral geniculate nucleus of the thalamus. (<bold>D</bold>) Single periaqueductal gray (PAG) neuron. The PAG and superior colliculus are also shown. The neuron&#8217;s morphology was reconstructed by targeting the expression of fluorescent proteins in excitatory neurons in the PAG via an intersectional viral strategy, followed by imaging of cleared tissue and manual reconstruction of the neuron&#8217;s morphology with Vaa3D software. Data were registered to the Allen atlas with SHARPTRACK (<xref ref-type="bibr" rid="bib24">Shamash et al., 2018</xref>). The 3D data was saved as a .stl file and loaded directly into brainrender. (<bold>E</bold>) Gene expression data. Left, expression of genes &#8216;brn3c&#8217; and &#8216;nk1688CGt&#8217; in the tectum of the larval zebrafish brain (gene expression data from fishatlas.neuro.mpg.de, 3D objects created with custom python scripts). Right, expression of gene &#8216;Gpr161&#8217; in the mouse hippocampus (gene expression data from <xref ref-type="bibr" rid="bib32">Wang et al., 2020</xref>), downloaded with brainrender (3D objects created with brainrender). Colored voxels show voxels with high gene expressions. The CA1 field of the hippocampus is also shown.</p></caption><graphic mime-subtype="jpeg" mimetype="image" xlink:href="elife-65751.xml.media/fig3.jpg"/></fig><p>Brainrender also simplifies visualizing the location of devices implanted in the brain for neural activity recordings or manipulations, such as electrodes or optical fibers. Post&#160;hoc histological images taken to confirm the correct placement of the device can be registered to a reference atlas using appropriate software, and the registered data can be imported into brainrender (<xref ref-type="fig" rid="fig3">Figure 3C</xref>). This type of visualization greatly facilitates cross-animal comparisons and helps data interpretation within and across research groups.</p><p>Finally, brainrender can be used to visualize any object represented by the most commonly used file formats for 3D design (e.g. .obj,&#160;.stl), thus ensuring that brainrender can flexibly adapt to the visualization needs of the user (<xref ref-type="fig" rid="fig3">Figure 3D</xref>).</p></sec><sec id="s2-3"><title>Individual neurons and mesoscale connectomics</title><p>Recent advances in large field of view and whole-brain imaging allow the generation of brain-wide data at single neuron resolution. Having a platform for visualizing these datasets with ease is critical for exploratory data analyses. Several open-source software packages are available for registering large amounts of such imaging data and automatically identify labeled cells (e.g. expressing fluorescent proteins) (<xref ref-type="bibr" rid="bib30">Tyson et al., 2020a</xref>; <xref ref-type="bibr" rid="bib9">F&#252;rth et al., 2018</xref>; <xref ref-type="bibr" rid="bib10">Goubran et al., 2019</xref>; <xref ref-type="bibr" rid="bib23">Renier et al., 2016</xref>). This processing step outputs a table of coordinates for a set of labeled cells, which can be directly imported into brainrender to visualize a wealth of anatomical data at cellular resolution (<xref ref-type="fig" rid="fig4">Figure 4A</xref>).</p><fig id="fig4" position="float"><label>Figure 4.</label><caption><title>Visualizing cell location and morphological data.</title><p>(<bold>A</bold>) Visualizing the location of labeled cells. Left, visualization of fluorescently labeled cells identified using cellfinder (data from <xref ref-type="bibr" rid="bib31">Tyson and Rousseau, 2020b</xref>). Right, visualization of functionally defined clusters of regions of interest in the brain of a zebrafish larvae during a visuomotor task (data from <xref ref-type="bibr" rid="bib17">Markov et al., 2020</xref>). (<bold>B</bold>) Visualizing neuronal morphology data. Left, three secondary motor cortex neurons projecting to the thalamus (data from <xref ref-type="bibr" rid="bib33">Winnubst et al., 2019</xref>, downloaded with morphapi from <ext-link ext-link-type="uri" xlink:href="http://neuromorpho.org/">neuromorpho.org</ext-link>, <xref ref-type="bibr" rid="bib4">Ascoli et al., 2007</xref>). Right, morphology of cerebellar neurons in larval zebrafish (data from <xref ref-type="bibr" rid="bib15">Kunst et al., 2019</xref>), (downloaded with morphapi). In the left panel of (<bold>A</bold>&#160;and <bold>B</bold>), the brain outline was sliced along the midline to expose the data.</p></caption><graphic mime-subtype="jpeg" mimetype="image" xlink:href="elife-65751.xml.media/fig4.jpg"/></fig><p>Beyond the location of cell bodies, visualizing the entire dendritic and axonal arbors of single neurons registered to a reference atlas is important for understanding the distribution of neuronal signals across the brain. Single-cell morphologies are often complex 3D structures and therefore poorly represented in 2D images. Generating 3D interactive renderings is thus important to facilitate the exploration of this type of data. Brainrender can be used to parse and render&#160;.swc files containing morphological data, and it is fully integrated with morphapi, a software for downloading morphological data from publicly available datasets (e.g. from <ext-link ext-link-type="uri" xlink:href="http://neuromorpho.org/">neuromorpho.org</ext-link>, <xref ref-type="bibr" rid="bib4">Ascoli et al., 2007</xref>; <xref ref-type="fig" rid="fig4">Figure 4B</xref>).</p></sec><sec id="s2-4"><title>Producing figures, videos, and interactive visualizations with brainrender</title><p>A core goal of brainrender is to facilitate the production of high-quality images, videos, and interactive visualizations of anatomical data. Brainrender leverages the functionality provided by vedo (<xref ref-type="bibr" rid="bib20">Musy et al., 2019</xref>) to create images directly from the rendered scene. Renderings can also be exported to HTML files to create interactive visualizations that can be hosted online. Finally, functionality is provided to easily export videos from rendered scenes. Animated videos can be created by specifying parameters (e.g. the position of the camera or the transparency of a mesh) at selected keyframes. Brainrender then creates a video by animating the rendering between the keyframes. This approach facilitates the creation of videos while retaining the flexibility necessary to produce richly animated sequences (<xref ref-type="video" rid="video2">Videos 2</xref>&#8211;<xref ref-type="video" rid="video5">5</xref>). All example figures and videos in this article were generated directly in brainrender, with no further editing.</p><media id="video2" mime-subtype="mp4" mimetype="video" xlink:href="elife-65751-video2.mp4"><label>Video 2.</label><caption><title>Animated video created with brainrender.</title><p>Visualization of neuronal morphologies for two layer 5b pyramidal neurons in the secondary motor area of the mouse brain. <xref ref-type="bibr" rid="bib33">Winnubst et al., 2019</xref>, downloaded with morphapi from <ext-link ext-link-type="uri" xlink:href="http://neuromorpho.org/">neuromorpho.org</ext-link>. The secondary motor area and thalamus are also shown.</p></caption></media><media id="video3" mime-subtype="mp4" mimetype="video" xlink:href="elife-65751-video3.mp4"><label>Video 3.</label><caption><title>Animated video created with brainrender.</title><p>Frontal view of all brain regions in the Allen Mouse Brain atlas as the brain is progressively 'sliced' in the rostro-caudal direction.</p></caption></media><media id="video4" mime-subtype="mp4" mimetype="video" xlink:href="elife-65751-video4.mp4"><label>Video 4.</label><caption><title>Animated video created with brainrender.</title><p>Visualization of the location of three implanted neuropixel probes from multiple mice (data from <xref ref-type="bibr" rid="bib28">Steinmetz et al., 2019</xref>). Every 0.5 s, a subset of the probes&#8217;&#160;electrodes that detected a neuron's action potential are shown in salmon to visualize neuronal activity.</p></caption></media><media id="video5" mime-subtype="mp4" mimetype="video" xlink:href="elife-65751-video5.mp4"><label>Video 5.</label><caption><title>Animated video created with brainrender showing the location of cells labeled by targeted expression of a fluorescent protein identified with cellfinder (data from <xref ref-type="bibr" rid="bib30">Tyson et al., 2020a</xref>).</title><p>In dark blue: streamline visualization of efferent projections from the retrosplenial cortex following injection of an anterogradely transported virus expressing fluorescent proteins (data from <xref ref-type="bibr" rid="bib21">Oh et al., 2014</xref>).</p></caption></media></sec></sec><sec id="s3" sec-type="discussion"><title>Discussion</title><p>In this article, we have presented brainrender, a python software for creating 3D&#160;renderings of anatomically registered data.</p><p>Brainrender addresses the current lack of python-based and user-friendly tools for rendering anatomical data. Being part with BrainGlobe&#8217;s suite of software tools for the analysis of anatomical data brainrender facilitates the development of integrated analysis pipelines and the re-usability of software tools across model species, minimizing the need for additional software development. Finally, brainrender promises to improve how anatomically registered data are disseminated both in scientific publications and in other media (e.g., hosted online).</p><sec id="s3-1"><title>Limitations and future directions</title><p>With brainrender, we aimed to make the rendering process as simple as possible. Nevertheless, some more technically demanding pre-processing steps of raw image data are necessary before they can be visualized in brainrender. In particular, a critical step for visualizing anatomical data is the registration to a reference template (e.g., one of the atlases provided by the AtlasAPI). While this step can be challenging and time-consuming, the brainglobe suite provides software to facilitate this process (e.g., brainreg and bg-space), and alternative software tools have been developed before for this purpose (e.g., <xref ref-type="bibr" rid="bib27">Song et al., 2020</xref>; <xref ref-type="bibr" rid="bib13">Jin et al., 2019</xref>). Additional information about data registration can be found in brainglobe&#8217;s and brainrender&#8217;s online documentation, as well as in the examples in brainrender&#8217;s GitHub repository. A related challenge is integrating new anatomical atlases into the AtlasAPI. While we anticipate that most users will not have this need, it is a non-trivial task that requires considerable programming skills. We believe that brainglobe&#8217;s AtlasAPI greatly facilitates this process, which is presented in <xref ref-type="bibr" rid="bib7">Claudi et al., 2020</xref> and has extensive online documentation.</p><p>Brainrender has been optimized for rendering quality instead of rendering performance. Other commonly used software tools like napari (<xref ref-type="bibr" rid="bib26">Sofroniew and Lambert, 2020</xref>) and ImageJ are dedicated to visualizing N-dimensional image data and perform very well even on large datasets. When comparing brainrender with other software, it is important to note brainrender is intended to work primarily with mesh data and not 3D&#160;image data. Although it can display image data (e.g., with the Volume actor), this functionality is not as fully developed as that using mesh data. A direct benchmarking comparison between brainrender and napari shows that brainrender is 5&#215; slower than napari at visualizing image data, but 20&#215; faster at visualizing mesh data. In both cases, however, brainrender achieves superior rendering quality. Other software packages dedicated to high-performance rendering, such as Blender, can handle mesh data with a performance that surpasses brainrender. Their use, however, comes with the large overhead of learning a very complex software to generate what most often will be simple renderings. It also requires that the users themselves take care of downloading, storing, and accessing mesh data from the anatomical atlases. Nevertheless, the rendering performance of brainrender could be a target for improvement in future versions, both for images and for&#160;mesh data, through optimizing the Actor classes. While we have designed brainrender usage to require minimal programming expertise, installing python and brainrender may still prove challenging for some users. In the future, we aim to make brainrender a stand-alone application that can be simply downloaded and locally installed, either through Docker containers or through&#160;executable files. Further possible improvements include the development of plug-ins for loading of data from file formats other than those already supported, and improvements to the GUI functionality. Moreover, in addition to images and videos, brainrender can be used to export renderings as HTML files and generate online 3D interactive renderings. Currently, however, embedding renderings into a web page remains far from a trivial task. Further developments on this front should make it possible to easily host interactive renderings online, therefore improving how anatomically registered data are disseminated both in scientific publications and in&#160;other media. While we plan to continue developing brainrender in the future, we welcome contributions from the community. Users should feel encouraged to contribute irrespective of their programming experience, and we note that the programming ability of many biologists is often better than what they perceive it to be. We especially welcome contributions aimed at improving the user-experience of brainrender, at any level of interaction. Contributions can involve active development of brainrender&#8217;s code base, but they can also be bug reports, features request, improvements with the online documentation, and help answering users&#8217; questions.</p></sec></sec><sec id="s4" sec-type="materials|methods"><title>Materials and methods</title><table-wrap id="keyresource" position="anchor"><label>Key resources table</label><table frame="hsides" rules="groups"><thead><tr><th valign="top">Reagent type (species) or resource</th><th valign="top">Designation</th><th valign="top">Source or reference</th><th valign="top">Identifiers</th><th valign="top">Additional information</th></tr></thead><tbody><tr><td valign="top">Software, algorithm</td><td valign="top">Numpy</td><td valign="top"><ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1038/s41586-020-2649-2">https://doi.org/10.1038/s41586-020-2649-2</ext-link></td><td valign="top">RRID:<ext-link ext-link-type="uri" xlink:href="https://scicrunch.org/resolver/RRID:SCR_008633">SCR_008633</ext-link></td><td valign="top"/></tr><tr><td valign="top">Software, algorithm</td><td valign="top">Vtk</td><td valign="top"><ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1016/j.softx.2015.04.001">https://doi.org/10.1016/j.softx.2015.04.001</ext-link></td><td valign="top">RRID:<ext-link ext-link-type="uri" xlink:href="https://scicrunch.org/resolver/RRID:SCR_015013">SCR_015013</ext-link></td><td valign="top"/></tr><tr><td valign="top">Software, algorithm</td><td valign="top">Vedo</td><td valign="top"><ext-link ext-link-type="uri" xlink:href="https://zenodo.org/record/4287635">https://zenodo.org/record/4287635</ext-link></td><td valign="top"/><td valign="top"/></tr><tr><td valign="top">Software, algorithm</td><td valign="top">BrainGlobe Atlas API</td><td valign="top"><ext-link ext-link-type="uri" xlink:href="https://doi.org/10.21105/joss.02668">https://doi.org/10.21105/joss.02668</ext-link></td><td valign="top"/><td valign="top"/></tr><tr><td valign="top">Software, algorithm</td><td valign="top">Pandas</td><td valign="top"><ext-link ext-link-type="uri" xlink:href="https://doi.org/10.5281/zenodo.3509134">https://doi.org/10.5281/zenodo.3509134</ext-link></td><td valign="top"/><td valign="top"/></tr><tr><td valign="top">Software, algorithm</td><td valign="top">Matplotlib</td><td valign="top">doi: <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1109/MCSE.2007.55">10.1109/MCSE.2007.55</ext-link></td><td valign="top">RRID:<ext-link ext-link-type="uri" xlink:href="https://scicrunch.org/resolver/RRID:SCR_008624">SCR_008624</ext-link></td><td valign="top"/></tr><tr><td valign="top">Software, algorithm</td><td valign="top">Jupyter</td><td valign="top">doi:<ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3233/978-1-61499-649-1-87">10.3233/978-1-61499-649-1-87</ext-link></td><td valign="top">RRID:<ext-link ext-link-type="uri" xlink:href="https://scicrunch.org/resolver/RRID:SCR_018416">SCR_018416</ext-link></td><td valign="top"/></tr></tbody></table></table-wrap><sec id="s4-1"><title>Brainrender&#8217;s workflow</title><p>Brainrender is written in Python three and depends on standard python packages such as numpy, matplotlib, and pandas (<xref ref-type="bibr" rid="bib11">Harris et al., 2020</xref>; <xref ref-type="bibr" rid="bib12">Hunter, 2007</xref>; <xref ref-type="bibr" rid="bib29">The pandas development team, 2020</xref>) and on vedo (<xref ref-type="bibr" rid="bib20">Musy et al., 2019</xref>) and BrainGlobe&#8217;s AtlasAPI (<xref ref-type="bibr" rid="bib7">Claudi et al., 2020</xref>). Extensive documentation on how to install and use brainrender can be found at docs.brainrender.info, and we provide here a only brief overview of the workflow in brainrender. The GitHub repository also contains detailed examples of Python scripts and Jupyter notebooks (<xref ref-type="bibr" rid="bib14">Kluyver et al., 2016</xref>). All brainrender&#8217;s code is open-source and has been deposited in full in the GitHub repository and at PyPI (a repository of Python software) under a permissive BSD 3-Clause license. We welcome any user to download and inspect the source code, modify it as needed, or contribute to brainrender&#8217;s development directly.</p><p>Brainrender can be installed in any python environment using python version&#160;&#8805; 3.6.0. We recommend the creation of an anaconda or virtual environment with an appropriate python version for use with brainrender. Installing brainrender is then as simple as &#8216;pip install brainrender&#8217; although additional optional packages might have to be installed separately (e.g., to access data from the Allen Institute).</p><p>The central element of any visualization produced by brainrender is the Scene. A Scene controls which elements (Actors) are visualized and coordinates the rendering, the position of the camera&#8217;s point of view, the generation of screenshots and animations from the rendered scene, and other important actions.</p><p>Actors can be added to the scene in several ways. When loading data directly from a file with 3D mesh information (e.g. .obj), an Actor is generated automatically to represent the mesh in the rendering. When rendering data from other sources (e.g. from a .swc file with neuronal morphology or from a table of coordinates of labeled cells), dedicated functions in brainrender parse the input data and generate the corresponding Actors. Actors in brainrender have properties, such as color and transparency, that can be used to specify the appearance of a rendered actor accordingly to the user&#8217;s aesthetic preferences. Brainrender&#8217;s Scene and Actor functionality use vedo as the rendering engine (GitHub repository; <xref ref-type="bibr" rid="bib20">Musy et al., 2019</xref>).</p><p>In addition to data loaded from external files, brainrender can directly load atlas data containing, for example, the 3D meshes of individual brain regions. This is done via BrainGlobe&#8217;s AtlasAPI to allow the same programming interface in brainrender to visualize data from any atlas supported by the AtlasAPI. Brainrender also provides additional functionality to interface with data available from projects that are part of the Allen Institute Mouse Atlas and Mouse Connectome projects (<xref ref-type="bibr" rid="bib32">Wang et al., 2020</xref>; <xref ref-type="bibr" rid="bib21">Oh et al., 2014</xref>). These projects provide an SDK (Software Development Kit) to directly download data from their database, and brainrender provides a simple interface for downloading gene&#160;expression and connectomics (streamlines) data. All atlas and connectomics data downloaded by brainrender can be loaded directly into a Scene as Actors.</p><p>Visualizing morphological data with reconstructions of individual neurons can be done by loading these type of data directly from .swc files or by downloading them in Python using morphapi &#8211; software from the BrainGlobe suite that provides a simple and unified interface with several databases of neuron morphologies (e.g., <ext-link ext-link-type="uri" xlink:href="http://neuromorpho.org/">neuromorpho.org</ext-link>, <xref ref-type="bibr" rid="bib4">Ascoli et al., 2007</xref>). Data downloaded with morphapi can be loaded directly into a brainrender scene for visualization.</p></sec><sec id="s4-2"><title>Example code</title><p>As a demonstration of how easily renderings can be created in brainrender, the Python code (<xref ref-type="fig" rid="fig5">Figure 5</xref>) illustrates how to create a Scene and add Actors by loading 3D data from an .obj file and then adding brain regions to the visualization. Brainrender&#8217;s GitHub repository provides several simple and concise examples about how to use brainrender to load user data, atlas data, to edit rendered meshes (e.g., to change color or cut them with a plane), to save screenshots from rendered scenes, and to&#160;create animated videos.</p><fig id="fig5" position="float"><label>Figure 5.</label><caption><title>Code examples.</title><p>(<bold>A</bold>) Example code to visualize a set of labeled cells coordinates using the Points actor class. (<bold>B</bold>) Code example illustrating how to override brainrender&#8217;s default settings and how to use custom camera settings. (<bold>C</bold>) Code example showing how custom mesh objects saved as .obj and .stl files can be visualized in brainrender. (<bold>D</bold>) Example usage of brainrender&#8217;s Animation class to create custom animations. Further examples can be found in brainrender&#8217;s GitHub repository.</p></caption><graphic mime-subtype="jpeg" mimetype="image" xlink:href="elife-65751.xml.media/fig5.jpg"/></fig><p>While brainrender is intended to be mainly a visualization tool, simple analyses can be carried out directly by leveraging functionality from either vedo or BrainGlobe&#8217;s AtlasAPI. For example, Vedo can access properties of actors added to a brainrender scene, which could be used to measure the distance between two actors or to check if two actors&#8217; meshes intersect (<xref ref-type="fig" rid="fig6">Figure 6A</xref>). Similarly, BrainGlobe&#8217;s AtlasAPI provides methods to, for example, check whether a point (defined by a set of coordinates) is contained in a brain region of interest or to retrieve brain regions that are above or below a brain region of interest in the atlas&#8217; hierarchy (<xref ref-type="fig" rid="fig6">Figure 6B</xref>).</p><fig id="fig6" position="float"><label>Figure 6.</label><caption><title>Advanced code examples.</title><p>(<bold>A</bold>) Example code to measure the distance between actors and if a given actor is contained in a target brain region. Left: virus injection volumes (red and gray) reconstructed from virus injections targeted at the superior colliculus (magenta). Gray colored injection volumes show data from the Allen Mouse Connectome <xref ref-type="bibr" rid="bib21">Oh et al., 2014</xref>. Right: example code to measure the distance between the center of two brainrender actors and to check if an actor&#8217;s center is contained in a brain region of interest. (<bold>B</bold>) Code example illustrating how check if a point (e.g., representing a labeled cell) is in a brain region of interest. Left: visualization of reconstructed probe positions from several individual animals, data from <xref ref-type="bibr" rid="bib28">Steinmetz et al., 2019</xref>. Probe channels located in the thalamus (red) are highlighted. Right: example code showing how to use BrainGlobe&#8217;s AtlasAPI to verify whether&#160;a point (here representing a probe channel) is contained in a brain region of interest or any of its substructures. Further examples can be found in brainrender&#8217;s GitHub repository.</p></caption><graphic mime-subtype="jpeg" mimetype="image" xlink:href="elife-65751.xml.media/fig6.jpg"/></fig><p>The code and data used to generate the figures and videos in this article are&#160;made freely available at brainrender&#8217;s GitHub repository and provides examples of more advanced usage of brainrender&#8217;s functionality.</p></sec><sec id="s4-3"><title>Benchmark tests</title><p>We designed a series of benchmark tests aimed at evaluating brainrender&#8217;s performance with different combinations of hardware and operating system. We used five tests designed to cover most aspects of brainrender&#8217;s functionality:</p><list list-type="bullet"><list-item><p>rendering large numbers (1<sup>4</sup>, 1<sup>6</sup>, 1<sup>7</sup>) of cells using the Points actor.</p></list-item><list-item><p>using a plane to &#8216;slice&#8217; the same number of cells (using the Scene.slice method).</p></list-item><list-item><p>rendering more than 1000 individual meshes representing brain regions from the Allen institute&#8217;s mouse brain.</p></list-item><list-item><p>making a short (3 s, 10 fps) animation of a spinning brain with several brain regions&#8217; meshes displayed.</p></list-item><list-item><p>rendering (10 times) a 3D image representing the voxel-wise expression levels of gene Gpr161 in the mouse brain (data from the Allen Institute).</p></list-item></list><p>For each test, we estimated the time necessary to complete the test script as well as the frame rate of the interactive rendering. Four machines were used for benchmark tests (see <xref ref-type="table" rid="table1">Table 1</xref>). The results of the benchmark tests (see Key resource table) illustrate that although a GPU improves performance, in the absence of a dedicated GPU brainrender can handle rich interactive visualizations (for most user cases, the number of rendered mesh vertices is much lower than that used in the tests).</p></sec></sec></body><back><ack id="ack"><title>Acknowledgements</title><p>We thank Yu Lin Tan for sharing the single neuron morphology shown in 3D. The illustrations of a human, mouse, and zebrafish used in <xref ref-type="fig" rid="fig1">Figures 1</xref>, <xref ref-type="fig" rid="fig2">2,</xref> and <xref ref-type="fig" rid="fig3">3</xref> were obtained from <ext-link ext-link-type="uri" xlink:href="https://scidraw.io/">scidraw.io</ext-link>.</p></ack><sec id="s5" sec-type="additional-information"><title>Additional information</title><fn-group content-type="competing-interest"><title>Competing interests</title><fn fn-type="COI-statement" id="conf1"><p>No competing interests declared</p></fn></fn-group><fn-group content-type="author-contribution"><title>Author contributions</title><fn fn-type="con" id="con1"><p>Conceptualization, Resources, Software, Validation, Visualization, Methodology, Writing - original draft, Project administration, Writing - review and editing</p></fn><fn fn-type="con" id="con2"><p>Conceptualization, Resources, Software, Writing - original draft</p></fn><fn fn-type="con" id="con3"><p>Conceptualization, Resources, Software, Writing - original draft</p></fn><fn fn-type="con" id="con4"><p>Supervision, Project administration</p></fn><fn fn-type="con" id="con5"><p>Supervision, Project administration</p></fn><fn fn-type="con" id="con6"><p>Supervision, Funding acquisition, Writing - original draft, Project administration, Writing - review and editing</p></fn></fn-group></sec><sec id="s6" sec-type="supplementary-material"><title>Additional files</title><supplementary-material id="transrepform"><label>Transparent reporting form</label><media mime-subtype="docx" mimetype="application" xlink:href="elife-65751-transrepform-v3.docx"/></supplementary-material></sec><sec id="s7" sec-type="data-availability"><title>Data availability</title><p>All code has been deposited on GitHub and is freely accessible (<ext-link ext-link-type="uri" xlink:href="https://github.com/brainglobe/brainrender">https://github.com/brainglobe/brainrender</ext-link>).</p></sec><ref-list><title>References</title><ref id="bib1"><element-citation publication-type="preprint"><person-group person-group-type="author"><name><surname>Adkins</surname> <given-names>RS</given-names></name><name><surname>Aldridge</surname> <given-names>AI</given-names></name><name><surname>Allen</surname> <given-names>S</given-names></name><name><surname>Ament</surname> <given-names>SA</given-names></name><name><surname>An</surname> <given-names>X</given-names></name><name><surname>Armand</surname> <given-names>E</given-names></name><collab>BRAIN Initiative Cell Census Network (BICCN)</collab></person-group><year iso-8601-date="2020">2020</year><article-title><italic>A multimodal cell census and atlas of the mammalian primary motor cortex</italic></article-title><source>bioRxiv</source><pub-id pub-id-type="doi">10.1101/2020.10.19.343129</pub-id></element-citation></ref><ref id="bib2"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Arganda-Carreras</surname> <given-names>I</given-names></name><name><surname>Manoliu</surname> <given-names>T</given-names></name><name><surname>Mazuras</surname> <given-names>N</given-names></name><name><surname>Schulze</surname> <given-names>F</given-names></name><name><surname>Iglesias</surname> <given-names>JE</given-names></name><name><surname>B&#252;hler</surname> <given-names>K</given-names></name><name><surname>Jenett</surname> <given-names>A</given-names></name><name><surname>Rouyer</surname> <given-names>F</given-names></name><name><surname>Andrey</surname> <given-names>P</given-names></name></person-group><year iso-8601-date="2018">2018</year><article-title>A statistically representative Atlas for mapping neuronal circuits in the <italic>Drosophila</italic> Adult Brain</article-title><source>Frontiers in Neuroinformatics</source><volume>12</volume><elocation-id>13</elocation-id><pub-id pub-id-type="doi">10.3389/fninf.2018.00013</pub-id><pub-id pub-id-type="pmid">29628885</pub-id></element-citation></ref><ref id="bib3"><element-citation publication-type="preprint"><person-group person-group-type="author"><name><surname>Arshadi</surname> <given-names>C</given-names></name><name><surname>Eddison</surname> <given-names>M</given-names></name><name><surname>Gunther</surname> <given-names>UA</given-names></name><name><surname>Harrington</surname> <given-names>KI</given-names></name></person-group><year iso-8601-date="2020">2020</year><article-title>SNT: a unifying toolbox for quantification of neuronal anatomy</article-title><source>bioRxiv</source><pub-id pub-id-type="doi">10.1101/2020.07.13.179325</pub-id></element-citation></ref><ref id="bib4"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Ascoli</surname> <given-names>GA</given-names></name><name><surname>Donohue</surname> <given-names>DE</given-names></name><name><surname>Halavi</surname> <given-names>M</given-names></name></person-group><year iso-8601-date="2007">2007</year><article-title>NeuroMorpho.Org: a central resource for neuronal morphologies</article-title><source>Journal of Neuroscience</source><volume>27</volume><fpage>9247</fpage><lpage>9251</lpage><pub-id pub-id-type="doi">10.1523/JNEUROSCI.2055-07.2007</pub-id><pub-id pub-id-type="pmid">17728438</pub-id></element-citation></ref><ref id="bib5"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Bates</surname> <given-names>AS</given-names></name><name><surname>Manton</surname> <given-names>JD</given-names></name><name><surname>Jagannathan</surname> <given-names>SR</given-names></name><name><surname>Costa</surname> <given-names>M</given-names></name><name><surname>Schlegel</surname> <given-names>P</given-names></name><name><surname>Rohlfing</surname> <given-names>T</given-names></name><name><surname>Jefferis</surname> <given-names>GS</given-names></name></person-group><year iso-8601-date="2020">2020</year><article-title>The Natverse, a versatile toolbox for combining and analysing neuroanatomical data</article-title><source>eLife</source><volume>9</volume><elocation-id>e53350</elocation-id><pub-id pub-id-type="doi">10.7554/eLife.53350</pub-id><pub-id pub-id-type="pmid">32286229</pub-id></element-citation></ref><ref id="bib6"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Chon</surname> <given-names>U</given-names></name><name><surname>Vanselow</surname> <given-names>DJ</given-names></name><name><surname>Cheng</surname> <given-names>KC</given-names></name><name><surname>Kim</surname> <given-names>Y</given-names></name></person-group><year iso-8601-date="2019">2019</year><article-title>Enhanced and unified anatomical labeling for a common mouse brain atlas</article-title><source>Nature Communications</source><volume>10</volume><elocation-id>5067</elocation-id><pub-id pub-id-type="doi">10.1038/s41467-019-13057-w</pub-id><pub-id pub-id-type="pmid">31699990</pub-id></element-citation></ref><ref id="bib7"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Claudi</surname> <given-names>F</given-names></name><name><surname>Petrucco</surname> <given-names>L</given-names></name><name><surname>Tyson</surname> <given-names>A</given-names></name><name><surname>Branco</surname> <given-names>T</given-names></name><name><surname>Margrie</surname> <given-names>T</given-names></name><name><surname>Portugues</surname> <given-names>R</given-names></name></person-group><year iso-8601-date="2020">2020</year><article-title>BrainGlobe atlas API: a common interface for neuroanatomical atlases</article-title><source>Journal of Open Source Software</source><volume>5</volume><elocation-id>2668</elocation-id><pub-id pub-id-type="doi">10.21105/joss.02668</pub-id></element-citation></ref><ref id="bib8"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Ding</surname> <given-names>S&#8208;L</given-names></name><name><surname>Royall</surname> <given-names>JJ</given-names></name><name><surname>Sunkin</surname> <given-names>SM</given-names></name><name><surname>Ng</surname> <given-names>L</given-names></name><name><surname>Facer</surname> <given-names>BAC</given-names></name><name><surname>Lesnar</surname> <given-names>P</given-names></name><name><surname>Guillozet&#8208;Bongaarts</surname> <given-names>A</given-names></name><name><surname>McMurray</surname> <given-names>B</given-names></name><name><surname>Szafer</surname> <given-names>A</given-names></name><name><surname>Dolbeare</surname> <given-names>TA</given-names></name><name><surname>Stevens</surname> <given-names>A</given-names></name><name><surname>Tirrell</surname> <given-names>L</given-names></name><name><surname>Benner</surname> <given-names>T</given-names></name><name><surname>Caldejon</surname> <given-names>S</given-names></name><name><surname>Dalley</surname> <given-names>RA</given-names></name><name><surname>Dee</surname> <given-names>N</given-names></name><name><surname>Lau</surname> <given-names>C</given-names></name><name><surname>Nyhus</surname> <given-names>J</given-names></name><name><surname>Reding</surname> <given-names>M</given-names></name><name><surname>Riley</surname> <given-names>ZL</given-names></name><name><surname>Sandman</surname> <given-names>D</given-names></name><name><surname>Shen</surname> <given-names>E</given-names></name><name><surname>Kouwe</surname> <given-names>A</given-names></name><name><surname>Varjabedian</surname> <given-names>A</given-names></name><name><surname>Write</surname> <given-names>M</given-names></name><name><surname>Zollei</surname> <given-names>L</given-names></name><name><surname>Dang</surname> <given-names>C</given-names></name><name><surname>Knowles</surname> <given-names>JA</given-names></name><name><surname>Koch</surname> <given-names>C</given-names></name><name><surname>Phillips</surname> <given-names>JW</given-names></name><name><surname>Sestan</surname> <given-names>N</given-names></name><name><surname>Wohnoutka</surname> <given-names>P</given-names></name><name><surname>Zielke</surname> <given-names>HR</given-names></name><name><surname>Hohmann</surname> <given-names>JG</given-names></name><name><surname>Jones</surname> <given-names>AR</given-names></name><name><surname>Bernard</surname> <given-names>A</given-names></name><name><surname>Hawrylycz</surname> <given-names>MJ</given-names></name><name><surname>Hof</surname> <given-names>PR</given-names></name><name><surname>Fischl</surname> <given-names>B</given-names></name><name><surname>Lein</surname> <given-names>ES</given-names></name></person-group><year iso-8601-date="2016">2016</year><article-title>Comprehensive cellular&#8208;resolution atlas of the adult human brain</article-title><source>Journal of Comparative Neurology</source><volume>524</volume><fpage>3127</fpage><lpage>3481</lpage><pub-id pub-id-type="doi">10.1002/cne.24080</pub-id></element-citation></ref><ref id="bib9"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>F&#252;rth</surname> <given-names>D</given-names></name><name><surname>Vaissi&#232;re</surname> <given-names>T</given-names></name><name><surname>Tzortzi</surname> <given-names>O</given-names></name><name><surname>Xuan</surname> <given-names>Y</given-names></name><name><surname>M&#228;rtin</surname> <given-names>A</given-names></name><name><surname>Lazaridis</surname> <given-names>I</given-names></name><name><surname>Spigolon</surname> <given-names>G</given-names></name><name><surname>Fisone</surname> <given-names>G</given-names></name><name><surname>Tomer</surname> <given-names>R</given-names></name><name><surname>Deisseroth</surname> <given-names>K</given-names></name><name><surname>Carl&#233;n</surname> <given-names>M</given-names></name><name><surname>Miller</surname> <given-names>CA</given-names></name><name><surname>Rumbaugh</surname> <given-names>G</given-names></name><name><surname>Meletis</surname> <given-names>K</given-names></name></person-group><year iso-8601-date="2018">2018</year><article-title>An interactive framework for whole-brain maps at cellular resolution</article-title><source>Nature Neuroscience</source><volume>21</volume><fpage>139</fpage><lpage>149</lpage><pub-id pub-id-type="doi">10.1038/s41593-017-0027-7</pub-id><pub-id pub-id-type="pmid">29203898</pub-id></element-citation></ref><ref id="bib10"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Goubran</surname> <given-names>M</given-names></name><name><surname>Leuze</surname> <given-names>C</given-names></name><name><surname>Hsueh</surname> <given-names>B</given-names></name><name><surname>Aswendt</surname> <given-names>M</given-names></name><name><surname>Ye</surname> <given-names>L</given-names></name><name><surname>Tian</surname> <given-names>Q</given-names></name><name><surname>Cheng</surname> <given-names>MY</given-names></name><name><surname>Crow</surname> <given-names>A</given-names></name><name><surname>Steinberg</surname> <given-names>GK</given-names></name><name><surname>McNab</surname> <given-names>JA</given-names></name><name><surname>Deisseroth</surname> <given-names>K</given-names></name><name><surname>Zeineh</surname> <given-names>M</given-names></name></person-group><year iso-8601-date="2019">2019</year><article-title>Multimodal image registration and connectivity analysis for integration of connectomic data from microscopy to MRI</article-title><source>Nature Communications</source><volume>10</volume><elocation-id>5504</elocation-id><pub-id pub-id-type="doi">10.1038/s41467-019-13374-0</pub-id><pub-id pub-id-type="pmid">31796741</pub-id></element-citation></ref><ref id="bib11"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Harris</surname> <given-names>CR</given-names></name><name><surname>Millman</surname> <given-names>KJ</given-names></name><name><surname>van der Walt</surname> <given-names>SJ</given-names></name><name><surname>Gommers</surname> <given-names>R</given-names></name><name><surname>Virtanen</surname> <given-names>P</given-names></name><name><surname>Cournapeau</surname> <given-names>D</given-names></name><name><surname>Wieser</surname> <given-names>E</given-names></name><name><surname>Taylor</surname> <given-names>J</given-names></name><name><surname>Berg</surname> <given-names>S</given-names></name><name><surname>Smith</surname> <given-names>NJ</given-names></name><name><surname>Kern</surname> <given-names>R</given-names></name><name><surname>Picus</surname> <given-names>M</given-names></name><name><surname>Hoyer</surname> <given-names>S</given-names></name><name><surname>van Kerkwijk</surname> <given-names>MH</given-names></name><name><surname>Brett</surname> <given-names>M</given-names></name><name><surname>Haldane</surname> <given-names>A</given-names></name><name><surname>Del R&#237;o</surname> <given-names>JF</given-names></name><name><surname>Wiebe</surname> <given-names>M</given-names></name><name><surname>Peterson</surname> <given-names>P</given-names></name><name><surname>G&#233;rard-Marchant</surname> <given-names>P</given-names></name><name><surname>Sheppard</surname> <given-names>K</given-names></name><name><surname>Reddy</surname> <given-names>T</given-names></name><name><surname>Weckesser</surname> <given-names>W</given-names></name><name><surname>Abbasi</surname> <given-names>H</given-names></name><name><surname>Gohlke</surname> <given-names>C</given-names></name><name><surname>Oliphant</surname> <given-names>TE</given-names></name></person-group><year iso-8601-date="2020">2020</year><article-title>Array programming with NumPy</article-title><source>Nature</source><volume>585</volume><fpage>357</fpage><lpage>362</lpage><pub-id pub-id-type="doi">10.1038/s41586-020-2649-2</pub-id><pub-id pub-id-type="pmid">32939066</pub-id></element-citation></ref><ref id="bib12"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Hunter</surname> <given-names>JD</given-names></name></person-group><year iso-8601-date="2007">2007</year><article-title>Matplotlib: a 2D graphics environment</article-title><source>Computing in Science &amp; Engineering</source><volume>9</volume><fpage>90</fpage><lpage>95</lpage><pub-id pub-id-type="doi">10.1109/MCSE.2007.55</pub-id></element-citation></ref><ref id="bib13"><element-citation publication-type="preprint"><person-group person-group-type="author"><name><surname>Jin</surname> <given-names>M</given-names></name><name><surname>Nguyen</surname> <given-names>JD</given-names></name><name><surname>Weber</surname> <given-names>SJ</given-names></name><name><surname>Mejias-Aponte</surname> <given-names>CA</given-names></name><name><surname>Madangopal</surname> <given-names>R</given-names></name></person-group><year iso-8601-date="2019">2019</year><article-title><italic>SMART: an open source extension of whole brain for iDISCO+ LSFM intact mouse brain registration and segmentation</italic></article-title><source>bioRxiv</source><pub-id pub-id-type="doi">10.1101/727529</pub-id></element-citation></ref><ref id="bib14"><element-citation publication-type="book"><person-group person-group-type="author"><name><surname>Kluyver</surname> <given-names>T</given-names></name><name><surname>Ragan-Kelley</surname> <given-names>B</given-names></name><name><surname>P&#233;rez</surname> <given-names>F</given-names></name><name><surname>Granger</surname> <given-names>B</given-names></name><name><surname>Bussonnier</surname> <given-names>M</given-names></name><name><surname>Frederic</surname> <given-names>J</given-names></name><name><surname>Kelley</surname> <given-names>K</given-names></name></person-group><year iso-8601-date="2016">2016</year><chapter-title>Jupyter Notebooks - a Publishing Format for Reproducible Computational Workflows</chapter-title><person-group person-group-type="editor"><name><surname>Loizides</surname> <given-names>F. ernando</given-names></name><name><surname>Scmidt</surname> <given-names>B. irgit</given-names></name></person-group><source>Positioning and Power in Academic Publishing: Players, Agents and Agendas</source><publisher-loc>Netherlands</publisher-loc><publisher-name>IOS Press</publisher-name><fpage>87</fpage><lpage>90</lpage></element-citation></ref><ref id="bib15"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Kunst</surname> <given-names>M</given-names></name><name><surname>Laurell</surname> <given-names>E</given-names></name><name><surname>Mokayes</surname> <given-names>N</given-names></name><name><surname>Kramer</surname> <given-names>A</given-names></name><name><surname>Kubo</surname> <given-names>F</given-names></name><name><surname>Fernandes</surname> <given-names>AM</given-names></name><name><surname>F&#246;rster</surname> <given-names>D</given-names></name><name><surname>Dal Maschio</surname> <given-names>M</given-names></name><name><surname>Baier</surname> <given-names>H</given-names></name></person-group><year iso-8601-date="2019">2019</year><article-title>A Cellular-Resolution atlas of the larval zebrafish brain</article-title><source>Neuron</source><volume>103</volume><fpage>21</fpage><lpage>38</lpage><pub-id pub-id-type="doi">10.1016/j.neuron.2019.04.034</pub-id><pub-id pub-id-type="pmid">31147152</pub-id></element-citation></ref><ref id="bib16"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Lein</surname> <given-names>ES</given-names></name><name><surname>Hawrylycz</surname> <given-names>MJ</given-names></name><name><surname>Ao</surname> <given-names>N</given-names></name><name><surname>Ayres</surname> <given-names>M</given-names></name><name><surname>Bensinger</surname> <given-names>A</given-names></name><name><surname>Bernard</surname> <given-names>A</given-names></name><name><surname>Boe</surname> <given-names>AF</given-names></name><name><surname>Boguski</surname> <given-names>MS</given-names></name><name><surname>Brockway</surname> <given-names>KS</given-names></name><name><surname>Byrnes</surname> <given-names>EJ</given-names></name><name><surname>Chen</surname> <given-names>L</given-names></name><name><surname>Chen</surname> <given-names>L</given-names></name><name><surname>Chen</surname> <given-names>TM</given-names></name><name><surname>Chin</surname> <given-names>MC</given-names></name><name><surname>Chong</surname> <given-names>J</given-names></name><name><surname>Crook</surname> <given-names>BE</given-names></name><name><surname>Czaplinska</surname> <given-names>A</given-names></name><name><surname>Dang</surname> <given-names>CN</given-names></name><name><surname>Datta</surname> <given-names>S</given-names></name><name><surname>Dee</surname> <given-names>NR</given-names></name><name><surname>Desaki</surname> <given-names>AL</given-names></name><name><surname>Desta</surname> <given-names>T</given-names></name><name><surname>Diep</surname> <given-names>E</given-names></name><name><surname>Dolbeare</surname> <given-names>TA</given-names></name><name><surname>Donelan</surname> <given-names>MJ</given-names></name><name><surname>Dong</surname> <given-names>HW</given-names></name><name><surname>Dougherty</surname> <given-names>JG</given-names></name><name><surname>Duncan</surname> <given-names>BJ</given-names></name><name><surname>Ebbert</surname> <given-names>AJ</given-names></name><name><surname>Eichele</surname> <given-names>G</given-names></name><name><surname>Estin</surname> <given-names>LK</given-names></name><name><surname>Faber</surname> <given-names>C</given-names></name><name><surname>Facer</surname> <given-names>BA</given-names></name><name><surname>Fields</surname> <given-names>R</given-names></name><name><surname>Fischer</surname> <given-names>SR</given-names></name><name><surname>Fliss</surname> <given-names>TP</given-names></name><name><surname>Frensley</surname> <given-names>C</given-names></name><name><surname>Gates</surname> <given-names>SN</given-names></name><name><surname>Glattfelder</surname> <given-names>KJ</given-names></name><name><surname>Halverson</surname> <given-names>KR</given-names></name><name><surname>Hart</surname> <given-names>MR</given-names></name><name><surname>Hohmann</surname> <given-names>JG</given-names></name><name><surname>Howell</surname> <given-names>MP</given-names></name><name><surname>Jeung</surname> <given-names>DP</given-names></name><name><surname>Johnson</surname> <given-names>RA</given-names></name><name><surname>Karr</surname> <given-names>PT</given-names></name><name><surname>Kawal</surname> <given-names>R</given-names></name><name><surname>Kidney</surname> <given-names>JM</given-names></name><name><surname>Knapik</surname> <given-names>RH</given-names></name><name><surname>Kuan</surname> <given-names>CL</given-names></name><name><surname>Lake</surname> <given-names>JH</given-names></name><name><surname>Laramee</surname> <given-names>AR</given-names></name><name><surname>Larsen</surname> <given-names>KD</given-names></name><name><surname>Lau</surname> <given-names>C</given-names></name><name><surname>Lemon</surname> <given-names>TA</given-names></name><name><surname>Liang</surname> <given-names>AJ</given-names></name><name><surname>Liu</surname> <given-names>Y</given-names></name><name><surname>Luong</surname> <given-names>LT</given-names></name><name><surname>Michaels</surname> <given-names>J</given-names></name><name><surname>Morgan</surname> <given-names>JJ</given-names></name><name><surname>Morgan</surname> <given-names>RJ</given-names></name><name><surname>Mortrud</surname> <given-names>MT</given-names></name><name><surname>Mosqueda</surname> <given-names>NF</given-names></name><name><surname>Ng</surname> <given-names>LL</given-names></name><name><surname>Ng</surname> <given-names>R</given-names></name><name><surname>Orta</surname> <given-names>GJ</given-names></name><name><surname>Overly</surname> <given-names>CC</given-names></name><name><surname>Pak</surname> <given-names>TH</given-names></name><name><surname>Parry</surname> <given-names>SE</given-names></name><name><surname>Pathak</surname> <given-names>SD</given-names></name><name><surname>Pearson</surname> <given-names>OC</given-names></name><name><surname>Puchalski</surname> <given-names>RB</given-names></name><name><surname>Riley</surname> <given-names>ZL</given-names></name><name><surname>Rockett</surname> <given-names>HR</given-names></name><name><surname>Rowland</surname> <given-names>SA</given-names></name><name><surname>Royall</surname> <given-names>JJ</given-names></name><name><surname>Ruiz</surname> <given-names>MJ</given-names></name><name><surname>Sarno</surname> <given-names>NR</given-names></name><name><surname>Schaffnit</surname> <given-names>K</given-names></name><name><surname>Shapovalova</surname> <given-names>NV</given-names></name><name><surname>Sivisay</surname> <given-names>T</given-names></name><name><surname>Slaughterbeck</surname> <given-names>CR</given-names></name><name><surname>Smith</surname> <given-names>SC</given-names></name><name><surname>Smith</surname> <given-names>KA</given-names></name><name><surname>Smith</surname> <given-names>BI</given-names></name><name><surname>Sodt</surname> <given-names>AJ</given-names></name><name><surname>Stewart</surname> <given-names>NN</given-names></name><name><surname>Stumpf</surname> <given-names>KR</given-names></name><name><surname>Sunkin</surname> <given-names>SM</given-names></name><name><surname>Sutram</surname> <given-names>M</given-names></name><name><surname>Tam</surname> <given-names>A</given-names></name><name><surname>Teemer</surname> <given-names>CD</given-names></name><name><surname>Thaller</surname> <given-names>C</given-names></name><name><surname>Thompson</surname> <given-names>CL</given-names></name><name><surname>Varnam</surname> <given-names>LR</given-names></name><name><surname>Visel</surname> <given-names>A</given-names></name><name><surname>Whitlock</surname> <given-names>RM</given-names></name><name><surname>Wohnoutka</surname> <given-names>PE</given-names></name><name><surname>Wolkey</surname> <given-names>CK</given-names></name><name><surname>Wong</surname> <given-names>VY</given-names></name><name><surname>Wood</surname> <given-names>M</given-names></name><name><surname>Yaylaoglu</surname> <given-names>MB</given-names></name><name><surname>Young</surname> <given-names>RC</given-names></name><name><surname>Youngstrom</surname> <given-names>BL</given-names></name><name><surname>Yuan</surname> <given-names>XF</given-names></name><name><surname>Zhang</surname> <given-names>B</given-names></name><name><surname>Zwingman</surname> <given-names>TA</given-names></name><name><surname>Jones</surname> <given-names>AR</given-names></name></person-group><year iso-8601-date="2007">2007</year><article-title>Genome-wide atlas of gene expression in the adult mouse brain</article-title><source>Nature</source><volume>445</volume><fpage>168</fpage><lpage>176</lpage><pub-id pub-id-type="doi">10.1038/nature05453</pub-id><pub-id pub-id-type="pmid">17151600</pub-id></element-citation></ref><ref id="bib17"><element-citation publication-type="preprint"><person-group person-group-type="author"><name><surname>Markov</surname> <given-names>DA</given-names></name><name><surname>Kist</surname> <given-names>AM</given-names></name><name><surname>Petrucco</surname> <given-names>L</given-names></name><name><surname>Portugues</surname> <given-names>R</given-names></name></person-group><year iso-8601-date="2020">2020</year><article-title>The cerebellum recalibrates a feedback controller involved in motor control</article-title><source>bioRxiv</source><pub-id pub-id-type="doi">10.1101/2020.02.12.945956</pub-id></element-citation></ref><ref id="bib18"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Mathis</surname> <given-names>A</given-names></name><name><surname>Mamidanna</surname> <given-names>P</given-names></name><name><surname>Cury</surname> <given-names>KM</given-names></name><name><surname>Abe</surname> <given-names>T</given-names></name><name><surname>Murthy</surname> <given-names>VN</given-names></name><name><surname>Mathis</surname> <given-names>MW</given-names></name><name><surname>Bethge</surname> <given-names>M</given-names></name></person-group><year iso-8601-date="2018">2018</year><article-title>DeepLabCut: markerless pose estimation of user-defined body parts with deep learning</article-title><source>Nature Neuroscience</source><volume>21</volume><fpage>1281</fpage><lpage>1289</lpage><pub-id pub-id-type="doi">10.1038/s41593-018-0209-y</pub-id><pub-id pub-id-type="pmid">30127430</pub-id></element-citation></ref><ref id="bib19"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Muller</surname> <given-names>E</given-names></name><name><surname>Bednar</surname> <given-names>JA</given-names></name><name><surname>Diesmann</surname> <given-names>M</given-names></name><name><surname>Gewaltig</surname> <given-names>MO</given-names></name><name><surname>Hines</surname> <given-names>M</given-names></name><name><surname>Davison</surname> <given-names>AP</given-names></name></person-group><year iso-8601-date="2015">2015</year><article-title>Python in neuroscience</article-title><source>Frontiers in Neuroinformatics</source><volume>9</volume><elocation-id>11</elocation-id><pub-id pub-id-type="doi">10.3389/fninf.2015.00011</pub-id><pub-id pub-id-type="pmid">25926788</pub-id></element-citation></ref><ref id="bib20"><element-citation publication-type="software"><person-group person-group-type="author"><name><surname>Musy</surname> <given-names>M</given-names></name><name><surname>Dalmasso</surname> <given-names>G</given-names></name><name><surname>Sullivan</surname> <given-names>B</given-names></name></person-group><year iso-8601-date="2019">2019</year><data-title>Vedo, a Python Module for Scientific Visualization and Analysis of 3D Objects and Point Clouds Based on Vtk</data-title><version designator="2021.0.2">2021.0.2</version><publisher-name>Visualization Toolkit</publisher-name><ext-link ext-link-type="uri" xlink:href="https://pypi.org/project/vedo/">https://pypi.org/project/vedo/</ext-link></element-citation></ref><ref id="bib21"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Oh</surname> <given-names>SW</given-names></name><name><surname>Harris</surname> <given-names>JA</given-names></name><name><surname>Ng</surname> <given-names>L</given-names></name><name><surname>Winslow</surname> <given-names>B</given-names></name><name><surname>Cain</surname> <given-names>N</given-names></name><name><surname>Mihalas</surname> <given-names>S</given-names></name><name><surname>Wang</surname> <given-names>Q</given-names></name><name><surname>Lau</surname> <given-names>C</given-names></name><name><surname>Kuan</surname> <given-names>L</given-names></name><name><surname>Henry</surname> <given-names>AM</given-names></name><name><surname>Mortrud</surname> <given-names>MT</given-names></name><name><surname>Ouellette</surname> <given-names>B</given-names></name><name><surname>Nguyen</surname> <given-names>TN</given-names></name><name><surname>Sorensen</surname> <given-names>SA</given-names></name><name><surname>Slaughterbeck</surname> <given-names>CR</given-names></name><name><surname>Wakeman</surname> <given-names>W</given-names></name><name><surname>Li</surname> <given-names>Y</given-names></name><name><surname>Feng</surname> <given-names>D</given-names></name><name><surname>Ho</surname> <given-names>A</given-names></name><name><surname>Nicholas</surname> <given-names>E</given-names></name><name><surname>Hirokawa</surname> <given-names>KE</given-names></name><name><surname>Bohn</surname> <given-names>P</given-names></name><name><surname>Joines</surname> <given-names>KM</given-names></name><name><surname>Peng</surname> <given-names>H</given-names></name><name><surname>Hawrylycz</surname> <given-names>MJ</given-names></name><name><surname>Phillips</surname> <given-names>JW</given-names></name><name><surname>Hohmann</surname> <given-names>JG</given-names></name><name><surname>Wohnoutka</surname> <given-names>P</given-names></name><name><surname>Gerfen</surname> <given-names>CR</given-names></name><name><surname>Koch</surname> <given-names>C</given-names></name><name><surname>Bernard</surname> <given-names>A</given-names></name><name><surname>Dang</surname> <given-names>C</given-names></name><name><surname>Jones</surname> <given-names>AR</given-names></name><name><surname>Zeng</surname> <given-names>H</given-names></name></person-group><year iso-8601-date="2014">2014</year><article-title>A mesoscale connectome of the mouse brain</article-title><source>Nature</source><volume>508</volume><fpage>207</fpage><lpage>214</lpage><pub-id pub-id-type="doi">10.1038/nature13186</pub-id><pub-id pub-id-type="pmid">24695228</pub-id></element-citation></ref><ref id="bib22"><element-citation publication-type="preprint"><person-group person-group-type="author"><name><surname>Pachitariu</surname> <given-names>M</given-names></name><name><surname>Stringer</surname> <given-names>C</given-names></name><name><surname>Dipoppa</surname> <given-names>M</given-names></name><name><surname>Schr&#246;der</surname> <given-names>S</given-names></name><name><surname>Rossi</surname> <given-names>LF</given-names></name><name><surname>Dalgleish</surname> <given-names>H</given-names></name><name><surname>Carandini</surname> <given-names>M</given-names></name><name><surname>Harris</surname> <given-names>KD</given-names></name></person-group><year iso-8601-date="2017">2017</year><article-title>Suite2p: beyond 10,000 neurons with standard Two-Photon microscopy</article-title><source>bioRxiv</source><pub-id pub-id-type="doi">10.1101/061507</pub-id></element-citation></ref><ref id="bib23"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Renier</surname> <given-names>N</given-names></name><name><surname>Adams</surname> <given-names>EL</given-names></name><name><surname>Kirst</surname> <given-names>C</given-names></name><name><surname>Wu</surname> <given-names>Z</given-names></name><name><surname>Azevedo</surname> <given-names>R</given-names></name><name><surname>Kohl</surname> <given-names>J</given-names></name><name><surname>Autry</surname> <given-names>AE</given-names></name><name><surname>Kadiri</surname> <given-names>L</given-names></name><name><surname>Umadevi Venkataraju</surname> <given-names>K</given-names></name><name><surname>Zhou</surname> <given-names>Y</given-names></name><name><surname>Wang</surname> <given-names>VX</given-names></name><name><surname>Tang</surname> <given-names>CY</given-names></name><name><surname>Olsen</surname> <given-names>O</given-names></name><name><surname>Dulac</surname> <given-names>C</given-names></name><name><surname>Osten</surname> <given-names>P</given-names></name><name><surname>Tessier-Lavigne</surname> <given-names>M</given-names></name></person-group><year iso-8601-date="2016">2016</year><article-title>Mapping of brain activity by automated volume analysis of immediate early genes</article-title><source>Cell</source><volume>165</volume><fpage>1789</fpage><lpage>1802</lpage><pub-id pub-id-type="doi">10.1016/j.cell.2016.05.007</pub-id><pub-id pub-id-type="pmid">27238021</pub-id></element-citation></ref><ref id="bib24"><element-citation publication-type="preprint"><person-group person-group-type="author"><name><surname>Shamash</surname> <given-names>P</given-names></name><name><surname>Carandini</surname> <given-names>M</given-names></name><name><surname>Harris</surname> <given-names>K</given-names></name><name><surname>Steinmetz</surname> <given-names>N</given-names></name></person-group><year iso-8601-date="2018">2018</year><article-title>A tool for analyzing electrode tracks from slice histology</article-title><source>bioRxiv</source><pub-id pub-id-type="doi">10.1101/447995</pub-id></element-citation></ref><ref id="bib25"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Simmons</surname> <given-names>DM</given-names></name><name><surname>Swanson</surname> <given-names>LW</given-names></name></person-group><year iso-8601-date="2009">2009</year><article-title>Comparing histological data from different brains: sources of error and strategies for minimizing them</article-title><source>Brain Research Reviews</source><volume>60</volume><fpage>349</fpage><lpage>367</lpage><pub-id pub-id-type="doi">10.1016/j.brainresrev.2009.02.002</pub-id><pub-id pub-id-type="pmid">19248810</pub-id></element-citation></ref><ref id="bib26"><element-citation publication-type="software"><person-group person-group-type="author"><name><surname>Sofroniew</surname> <given-names>N</given-names></name><name><surname>Lambert</surname> <given-names>T</given-names></name></person-group><year iso-8601-date="2020">2020</year><data-title>Napari/Napari</data-title><source>Zenodo</source><version designator="0.3.8rc1">0.3.8rc1</version><ext-link ext-link-type="uri" xlink:href="https://doi.org/10.5281/zenodo.4046812">https://doi.org/10.5281/zenodo.4046812</ext-link></element-citation></ref><ref id="bib27"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Song</surname> <given-names>JH</given-names></name><name><surname>Choi</surname> <given-names>W</given-names></name><name><surname>Song</surname> <given-names>YH</given-names></name><name><surname>Kim</surname> <given-names>JH</given-names></name><name><surname>Jeong</surname> <given-names>D</given-names></name><name><surname>Lee</surname> <given-names>SH</given-names></name><name><surname>Paik</surname> <given-names>SB</given-names></name></person-group><year iso-8601-date="2020">2020</year><article-title>Precise mapping of single neurons by calibrated 3D reconstruction of brain slices reveals topographic projection in mouse visual cortex</article-title><source>Cell Reports</source><volume>31</volume><elocation-id>107682</elocation-id><pub-id pub-id-type="doi">10.1016/j.celrep.2020.107682</pub-id><pub-id pub-id-type="pmid">32460016</pub-id></element-citation></ref><ref id="bib28"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Steinmetz</surname> <given-names>NA</given-names></name><name><surname>Zatka-Haas</surname> <given-names>P</given-names></name><name><surname>Carandini</surname> <given-names>M</given-names></name><name><surname>Harris</surname> <given-names>KD</given-names></name></person-group><year iso-8601-date="2019">2019</year><article-title>Distributed coding of choice, action and engagement across the mouse brain</article-title><source>Nature</source><volume>576</volume><fpage>266</fpage><lpage>273</lpage><pub-id pub-id-type="doi">10.1038/s41586-019-1787-x</pub-id><pub-id pub-id-type="pmid">31776518</pub-id></element-citation></ref><ref id="bib29"><element-citation publication-type="software"><person-group person-group-type="author"><collab>The pandas development team</collab></person-group><year iso-8601-date="2020">2020</year><data-title>Pandas-Dev/Pandas: Pandas</data-title><source>Zenodo</source><version designator="1.2.3">1.2.3</version><ext-link ext-link-type="uri" xlink:href="https://doi.org/10.5281/zenodo.3509134">https://doi.org/10.5281/zenodo.3509134</ext-link></element-citation></ref><ref id="bib30"><element-citation publication-type="preprint"><person-group person-group-type="author"><name><surname>Tyson</surname> <given-names>AL</given-names></name><name><surname>Rousseau</surname> <given-names>CV</given-names></name><name><surname>Niedworok</surname> <given-names>CJ</given-names></name></person-group><year iso-8601-date="2020">2020a</year><article-title>A deep learning algorithm for 3D cell detection in whole mouse brain image datasets</article-title><source>bioRxiv</source><pub-id pub-id-type="doi">10.1101/2020.10.21.348771</pub-id></element-citation></ref><ref id="bib31"><element-citation publication-type="software"><person-group person-group-type="author"><name><surname>Tyson</surname> <given-names>AL</given-names></name><name><surname>Rousseau</surname> <given-names>CV</given-names></name></person-group><year iso-8601-date="2020">2020b</year><data-title>brainreg: automated 3D brain registration with support for multiple species and atlases</data-title><source>Zenodo</source><version designator="0.1.5">0.1.5</version><ext-link ext-link-type="uri" xlink:href="https://doi.org/10.5281/zenodo.3991718">https://doi.org/10.5281/zenodo.3991718</ext-link></element-citation></ref><ref id="bib32"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Wang</surname> <given-names>Q</given-names></name><name><surname>Ding</surname> <given-names>SL</given-names></name><name><surname>Li</surname> <given-names>Y</given-names></name><name><surname>Royall</surname> <given-names>J</given-names></name><name><surname>Feng</surname> <given-names>D</given-names></name><name><surname>Lesnar</surname> <given-names>P</given-names></name><name><surname>Graddis</surname> <given-names>N</given-names></name><name><surname>Naeemi</surname> <given-names>M</given-names></name><name><surname>Facer</surname> <given-names>B</given-names></name><name><surname>Ho</surname> <given-names>A</given-names></name><name><surname>Dolbeare</surname> <given-names>T</given-names></name><name><surname>Blanchard</surname> <given-names>B</given-names></name><name><surname>Dee</surname> <given-names>N</given-names></name><name><surname>Wakeman</surname> <given-names>W</given-names></name><name><surname>Hirokawa</surname> <given-names>KE</given-names></name><name><surname>Szafer</surname> <given-names>A</given-names></name><name><surname>Sunkin</surname> <given-names>SM</given-names></name><name><surname>Oh</surname> <given-names>SW</given-names></name><name><surname>Bernard</surname> <given-names>A</given-names></name><name><surname>Phillips</surname> <given-names>JW</given-names></name><name><surname>Hawrylycz</surname> <given-names>M</given-names></name><name><surname>Koch</surname> <given-names>C</given-names></name><name><surname>Zeng</surname> <given-names>H</given-names></name><name><surname>Harris</surname> <given-names>JA</given-names></name><name><surname>Ng</surname> <given-names>L</given-names></name></person-group><year iso-8601-date="2020">2020</year><article-title>The allen mouse brain common coordinate framework: a 3D reference atlas</article-title><source>Cell</source><volume>181</volume><fpage>936</fpage><lpage>953</lpage><pub-id pub-id-type="doi">10.1016/j.cell.2020.04.007</pub-id><pub-id pub-id-type="pmid">32386544</pub-id></element-citation></ref><ref id="bib33"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Winnubst</surname> <given-names>J</given-names></name><name><surname>Bas</surname> <given-names>E</given-names></name><name><surname>Ferreira</surname> <given-names>TA</given-names></name><name><surname>Wu</surname> <given-names>Z</given-names></name><name><surname>Economo</surname> <given-names>MN</given-names></name><name><surname>Edson</surname> <given-names>P</given-names></name><name><surname>Arthur</surname> <given-names>BJ</given-names></name><name><surname>Bruns</surname> <given-names>C</given-names></name><name><surname>Rokicki</surname> <given-names>K</given-names></name><name><surname>Schauder</surname> <given-names>D</given-names></name><name><surname>Olbris</surname> <given-names>DJ</given-names></name><name><surname>Murphy</surname> <given-names>SD</given-names></name><name><surname>Ackerman</surname> <given-names>DG</given-names></name><name><surname>Arshadi</surname> <given-names>C</given-names></name><name><surname>Baldwin</surname> <given-names>P</given-names></name><name><surname>Blake</surname> <given-names>R</given-names></name><name><surname>Elsayed</surname> <given-names>A</given-names></name><name><surname>Hasan</surname> <given-names>M</given-names></name><name><surname>Ramirez</surname> <given-names>D</given-names></name><name><surname>Dos Santos</surname> <given-names>B</given-names></name><name><surname>Weldon</surname> <given-names>M</given-names></name><name><surname>Zafar</surname> <given-names>A</given-names></name><name><surname>Dudman</surname> <given-names>JT</given-names></name><name><surname>Gerfen</surname> <given-names>CR</given-names></name><name><surname>Hantman</surname> <given-names>AW</given-names></name><name><surname>Korff</surname> <given-names>W</given-names></name><name><surname>Sternson</surname> <given-names>SM</given-names></name><name><surname>Spruston</surname> <given-names>N</given-names></name><name><surname>Svoboda</surname> <given-names>K</given-names></name><name><surname>Chandrashekar</surname> <given-names>J</given-names></name></person-group><year iso-8601-date="2019">2019</year><article-title>Reconstruction of 1,000 projection neurons reveals new cell types and organization of Long-Range connectivity in the mouse brain</article-title><source>Cell</source><volume>179</volume><fpage>268</fpage><lpage>281</lpage><pub-id pub-id-type="doi">10.1016/j.cell.2019.07.042</pub-id><pub-id pub-id-type="pmid">31495573</pub-id></element-citation></ref><ref id="bib34"><element-citation publication-type="software"><person-group person-group-type="author"><name><surname>Yaoyao</surname> <given-names>H</given-names></name></person-group><year iso-8601-date="2020">2020</year><data-title>BrainMesh: A Matlab Gui for Rendering 3D Mouse Brain Structures</data-title><source>Github</source><version designator="3.0">3.0</version><ext-link ext-link-type="uri" xlink:href="https://github.com/Yaoyao-Hao/BrainMesh">https://github.com/Yaoyao-Hao/BrainMesh</ext-link></element-citation></ref><ref id="bib35"><element-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Young</surname> <given-names>DM</given-names></name><name><surname>Duhn</surname> <given-names>C</given-names></name><name><surname>Gilson</surname> <given-names>M</given-names></name><name><surname>Nojima</surname> <given-names>M</given-names></name><name><surname>Yuruk</surname> <given-names>D</given-names></name><name><surname>Kumar</surname> <given-names>A</given-names></name><name><surname>Yu</surname> <given-names>W</given-names></name><name><surname>Sanders</surname> <given-names>SJ</given-names></name></person-group><year iso-8601-date="2020">2020</year><article-title>Whole-Brain image analysis and anatomical atlas 3D generation using MagellanMapper</article-title><source>Current Protocols in Neuroscience</source><volume>94</volume><elocation-id>e104</elocation-id><pub-id pub-id-type="doi">10.1002/cpns.104</pub-id><pub-id pub-id-type="pmid">32981139</pub-id></element-citation></ref></ref-list></back><sub-article article-type="decision-letter" id="sa1"><front-stub><article-id pub-id-type="doi">10.7554/eLife.65751.sa1</article-id><title-group><article-title>Decision letter</article-title></title-group><contrib-group><contrib contrib-type="editor"><name><surname>Mathis</surname><given-names>Mackenzie W</given-names></name><role>Reviewing Editor</role><aff><institution>EPFL</institution><country>Switzerland</country></aff></contrib></contrib-group><contrib-group><contrib contrib-type="reviewer"><name><surname>Nunez-Iglesias</surname><given-names>Juan</given-names> </name><role>Reviewer</role><aff><institution/></aff></contrib></contrib-group></front-stub><body><boxed-text><p>Our editorial process produces two outputs: (i) <ext-link ext-link-type="uri" xlink:href="https://sciety.org/articles/activity/10.1101/2020.02.23.961748">public reviews</ext-link> designed to be posted alongside <ext-link ext-link-type="uri" xlink:href="https://www.biorxiv.org/content/10.1101/2020.02.23.961748v2">the preprint</ext-link> for the benefit of readers; (ii) feedback on the manuscript for the authors, including requests for revisions, shown below. We also include an acceptance summary that explains what the editors found interesting or important about the work.</p></boxed-text><p><bold>Acceptance summary:</bold></p><p>Claudi et al., present a new tool for visualizing brain maps. In the era of new technologies to clear and analyze brains of model organisms, new tools are becoming increasingly important for researchers to interact with this data. Here, the authors report on a new python-based tool for just this: exploring, visualizing, and rendering this high dimensional (and large) data. Moreover, the authors provide rendering performance benchmarking, open source code, and extensive documentation. We believe this tool will be of great interest to researchers who need to visualize multiple brains within several key model organisms, and is written such that it can be adopted rapidly by the neuroscientific community.</p><p><bold>Decision letter after peer review:</bold></p><p>Thank you for submitting your article "Brainrender: a python-based software for visualizing anatomically registered data" for consideration by <italic>eLife</italic>. Your article has been reviewed by three peer reviewers, one of whom is a member of our Board of Reviewing Editors, and the evaluation has been overseen by Kate Wassum as the Senior Editor. The following individual involved in review of your submission has agreed to reveal their identity: Juan Nunez-Iglesias (Reviewer #2).</p><p>The reviewers have discussed their reviews with one another, and the Reviewing Editor has drafted this to help you prepare a revised submission.</p><p>Essential Revisions:</p><p>1) All reviews highlight some additional information required regarding usability (which file types are already supported out of the box, computational resources, run times, minimal example code in methods, how hard/easy is it given one's level of coding, etc.). We agreed these are critical. Please address each of these concerns.</p><p>2) 2 of 3 reviewers mention the need for citations of dependencies, so please address both reviewer #1 and #2's specific comments on this below.</p><p>3) Lastly, please consider revising the Conclusion/Discussion section. Some text feels redundant, and this space could be used to discuss limitations and future expansions more directly. Each review highlights some very nice strengths and some limitations, so please consider the recommendations when you edit this section.</p><p>Please include a key resource table if you have not already done so.</p><p><italic>Reviewer #2 (Recommendations for the authors):</italic></p><p>The paper does a great job of describing the need for this new software, as well most of the capabilities of the software. I did wonder specifically which file formats, other than.obj and.stl, were supported. Specifically, in subsection &#8220;Design principles and implementation&#8221;, it would be good to introduce at least one sentence about the exact IO capabilities of <italic>brainrender</italic> &#8220;out of the box&#8221;, as well as noting that the full scientific Python ecosystem can provide support for other formats. In fact, a whole section/paragraph about how to import data from custom file formats (e.g. a.czi file?) would be most welcome. I expect the first question of many readers will be "how do I get my data into this to try it out?"</p><p>The whole Conclusions section felt rather redundant. Even though this is common in scientific writing, I'd remove most of it, reduce it to one sentence, and instead elaborate on "limitations and future directions". I expect the authors can certainly come up with more ideas there (e.g. IO plugins?), as well as existing bugs (e.g. Video 4 has some distracting flickering on the electrodes &#8211; it's unclear to me whether this is expected, but I suspect not). Additionally, I recommend using this section to point out that readers can and should get involved with future development. The line "We welcome any user to.&#8230;" should be a paragraph in future directions rather than a one-liner in the Materials and methods. "We welcome users to submit bug reports and feature requests on our GitHub issues page, as well as usage questions on image.sc. Further, given the completely open source nature of the software, we especially welcome users who would like to help us improve the software for their use case&#8230;" In my experience, most biologists don't see themselves as capable of doing this, and most of those are wrong, so this is a good space to dispel that notion.</p><p>As an aside, the references need work. Many are incomplete, including two from this paper's co-authors (Tyson 2020a/b). Additionally, although NumPy is appropriately cited, several other packages are not:</p><p>&#8211; matplotlib is used for its colormaps; this should be mentioned in the methods and matplotlib cited.</p><p>&#8211; Jupyter is used for documentation and the Scene has some Jupyter compatibility, but Jupyter is not cited. Citation info for Jupyter appears to be discussed in this github comment: https://github.com/jupyter/jupyter/issues/190#issuecomment-721264013</p><p>&#8211; Pandas is used but not cited.</p><p>&#8211; I notice napari is used but only for its theme &#8211; I'd say it's appropriate to not cite it. Indeed, I would recommend the authors remove that dependency and instead copy the very small amount of information that they need from it.</p><p>A good resource for citation info in the scientific Python ecosystem is found here:</p><p>https://www.scipy.org/citing.html</p><p>I would also recommend naming NumPy specifically in the Materials and methods section (as well as the above packages), as I initially missed that the authors had appropriately cited it &#8211; just buried under the generic "standard python packages"</p><p>Finally, although there is a screenshot of the <italic>brainrender</italic> UI, *and* the supplementary videos show renderings created with <italic>brainrender</italic>, there is not a screen-captured video demonstrating the <italic>brainrender</italic> UI being used to generate a video. I would suggest including one as part of the supplementary materials, as that is something that many readers will be looking for &#8211; it is very hard to convey the usability of GUI software using text alone.</p><p>All in all, those are all easily fixed points and would encourage publication of the paper.</p><p><italic>Reviewer #3 (Recommendations for the authors):</italic></p><p>1) Emphasize the importance and difficulty of having accurately registered data. In our experience, this is the hardest part of the process, and it is just lightly discussed as a requirement for using the tool.</p><p>2) They state that it is easy to incorporate another atlas through the brainreg software, which can then be used with <italic>brainrender</italic>. As mentioned, it is our opinion that this is not a straight-forward task and that it would require significant programming skills to implement. Please provide more direction about how this can be done and what the constraints are.</p><p>3) One of the stated advantages of this software is the ability to visualize multiple types of data and data from sources that are external to the atlas generators. Such visualizations can potentially be used to reveal consistency and/or novelty across data types. However, ultimately you would want to be able to measure these differences. Including examples about how to extract and compare features across data types and then visualize those differences with <italic>brainrender</italic> would strengthen the paper.</p><p>4) The snapshots of code presented as figures don't add much to the manuscript. Consider highlighting how-to videos instead.</p><p>5) As stated in earlier comments, some of the functionality is not clear, particularly for people unfamiliar or new to 3D visualization or coding in general. Even experienced developers would benefit from specs for the various input data types, for example. A little more explanation, particularly in cases where the interface is different (such as with the helper functions used to make some actors), can greatly improve the user experience.</p><p>6) Though the purpose of this tool is not to develop a registration algorithm for anatomical reference atlases, or to perform data analysis, we view these steps as the most difficult and necessary steps in this process. 3D rendered data visualizations are informative, but not meaningful without accurate registration to begin with and without quantitative analysis to back it up. As they point out in the introduction, other, similar tools (natverse, MagellanMapper) have both visualization and analysis capabilities.</p></body></sub-article><sub-article article-type="reply" id="sa2"><front-stub><article-id pub-id-type="doi">10.7554/eLife.65751.sa2</article-id><title-group><article-title>Author response</article-title></title-group></front-stub><body><disp-quote content-type="editor-comment"><p>Essential Revisions:</p><p>1) All reviews highlight some additional information required regarding usability (which file types are already supported out of the box, computational resources, run times, minimal example code in Materials and methods, how hard/easy is it given one's level of coding, etc.). We agreed these are critical. Please address each of these concerns.</p><p>2) 2 of 3 reviewers mention the need for citations of dependencies, so please address both reviewer #1 and #2's specific comments on this below.</p><p>3) Lastly, please consider revising the Conclusion/Discussion section. Some text feels redundant, and this space could be used to discuss limitations and future expansions more directly. Each review highlights some very nice strengths and some limitations, so please consider the recommendations when you edit this section.</p><p>Please include a key resource table if you have not already done so.</p></disp-quote><p>We are grateful for the reviewers&#8217; comments, which we found very helpful for improving the manuscript, as well as some of the code base and online documentation. We have addressed all of the concerns raised by including information about usability where this was missing, running benchmarking tests, citing references relevant to brainrender&#8217;s dependencies, and extensively revising the Discussion section to focus on the software&#8217;s strengths, limitations and opportunities for future improvements.</p><p>We now also include a key resource table.</p><disp-quote content-type="editor-comment"><p>Reviewer #2 (Recommendations for the authors):</p><p>The paper does a great job of describing the need for this new software, as well most of the capabilities of the software. I did wonder specifically which file formats, other than.obj and.stl, were supported. Specifically, in subsection &#8220;Design principles and implementation&#8221;, it would be good to introduce at least one sentence about the exact IO capabilities of brainrender *out of the box*, as well as noting that the full scientific Python ecosystem can provide support for other formats. In fact, a whole section/paragraph about how to import data from custom file formats (e.g. a.czi file?) would be most welcome. I expect the first question of many readers will be "how do I get my data into this to try it out?"</p></disp-quote><p>Yes, this is a very good point and we have now added a whole new section to the online documentation detailing the available options for getting data into <italic>brainrender</italic>: https://docs.brainrender.info/usage/using<ext-link ext-link-type="uri" xlink:href="https://docs.brainrender.info/usage/using-your-data">-your-data.</ext-link> We have also added new examples to the online repository illustrating how users can load and re-orient their data to visualize them in <italic>brainrender</italic> alongside atlas data.</p><p>In brief, <italic>brainrender</italic> can load directly cell coordinates and image (volume) data from.npy files, it can load streamlines data from.json (provided that they are in the correct format, as now stated in the online documentation) and it can load neuron morphology data from.swc (using morphapi). As suggested by the reviewer, we now include this information in the manuscript. We now also point out that the wider python ecosystem provides libraries for loading the most used file formats, and that <italic>brainglobe</italic> provides <italic>Imio</italic> as a convenient tool for loading anatomical data.</p><p>In the revised manuscript we also emphasize the critical data pre-processing steps that must be done before using <italic>brainrender</italic> (e.g., registration to an atlas template). As pointed out by another reviewer, these can be challenging for some users, and thus we now also briefly mention <italic>brainreg</italic> and <italic>bg-space</italic> as two <italic>brainglobe&#8217;s</italic> suite tools that can facilitate these preprocessing steps.</p><disp-quote content-type="editor-comment"><p>The whole Conclusions section felt rather redundant. Even though this is common in scientific writing, I'd remove most of it, reduce it to one sentence, and instead elaborate on "limitations and future directions". I expect the authors can certainly come up with more ideas there (e.g. IO plugins?), as well as existing bugs (e.g. Video 4 has some distracting flickering on the electrodes &#8211; it's unclear to me whether this is expected, but I suspect not). Additionally, I recommend using this section to point out that readers can and should get involved with future development. The line "We welcome any user to.&#8230;" should be a paragraph in future directions rather than a one-liner in the Materials and methods. "We welcome users to submit bug reports and feature requests on our GitHub issues page, as well as usage questions on image.sc. Further, given the completely open source nature of the software, we especially welcome users who would like to help us improve the software for their use case&#8230;" In my experience, most biologists don't see themselves as capable of doing this, and most of those are wrong, so this is a good space to dispel that notion.</p></disp-quote><p>We have taken the reviewer&#8217;s advice and substituted the Conclusions section by an expanded subsection &#8220;Limitations and future direction&#8221;, which now discusses additional topics such as rendering performance and processing steps required before data can be used meaningfully in <italic>brainrender</italic>.</p><p>Incidentally, the &#8220;flickering&#8221; observed in Video 4 was not a bug: it was meant to show which probe channels detected a spike at any given moment in time (we now realise that the video legends were not appended to the video; we apologise for this and have included video legends in a separate submission file). We have now edited the video so that the highlighted channels are updated every 5seconds (instead of every frame) hopefully improving the video&#8217;s quality.</p><p>We strongly agree that development should be open to all and in particular to the end users who benefit the most from improvements in the software. We also very much agree with the comment on the perceived programming ability of most biologists. As suggested by the reviewer we have emphasized this point in the discussion and explicitly invited users to contribute regardless of their programming experience. We now write:</p><p>&#8220;While we plan to continue developing <italic>brainrender</italic> in the future, we welcome contributions from the community. Users should feel encouraged to contribute irrespective of their programming experience, and we note that the programming ability of many biologists is often better than what they perceive it to be. We especially welcome contributions aimed at improving the user-experience of <italic>brainrender</italic>, at any level of interaction. Contributions can involve active development of <italic>brainrender's</italic> code base, but they can also be bug reports, features request, improvements with the online documentation and help answering users' questions.&#8221;</p><disp-quote content-type="editor-comment"><p>As an aside, the references need work. Many are incomplete, including two from this paper's co-authors (Tyson 2020a/b). Additionally, although NumPy is appropriately cited, several other packages are not:</p><p>&#8211; matplotlib is used for its colormaps; this should be mentioned in the methods and matplotlib cited.</p><p>&#8211; Jupyter is used for documentation and the Scene has some Jupyter compatibility, but Jupyter is not cited. Citation info for Jupyter appears to be discussed in this github comment: https://github.com/jupyter/jupyter/issues/190#issuecomment-721264013</p><p>&#8211; Pandas is used but not cited.</p><p>&#8211; I notice napari is used but only for its theme &#8211; I'd say it's appropriate to not cite it. Indeed, I would recommend the authors remove that dependency and instead copy the very small amount of information that they need from it.</p><p>A good resource for citation info in the scientific Python ecosystem is found here:</p><p>https://www.scipy.org/citing.html</p><p>I would also recommend naming NumPy specifically in the methods section (as well as the above packages), as I initially missed that the authors had appropriately cited it &#8211; just buried under the generic "standard python packages"</p></disp-quote><p>Thank you for pointing these out, we apologize for these shortcomings which we have now corrected. As suggested by the reviewer we have also updated <italic>brainrender</italic>&#8217;s code to remove the dependency on <italic>napari</italic>.</p><disp-quote content-type="editor-comment"><p>Finally, although there is a screenshot of the brainrender UI, *and* the supplementary videos show renderings created with brainrender, there is not a screen-captured video demonstrating the brainrender UI being used to generate a video. I would suggest including one as part of the supplementary materials, as that is something that many readers will be looking for &#8211; it is very hard to convey the usability of GUI software using text alone.</p></disp-quote><p>We have added a supplementary video (Video 5) illustrating the main functionality supported by the GUI.</p><disp-quote content-type="editor-comment"><p>All in all, those are all easily fixed points and would encourage publication of the paper.</p></disp-quote><p>Thank you for the support and for raising these points.</p><disp-quote content-type="editor-comment"><p>Reviewer #3 (Recommendations for the authors):</p><p>1) Emphasize the importance and difficulty of having accurately registered data. In our experience, this is the hardest part of the process, and it is just lightly discussed as a requirement for using the tool.</p></disp-quote><p>We have expanded the discussion on data registration in subsection &#8220;Limitations and futured directions&#8221; of the revised manuscript:</p><p>&#8220;With <italic>brainrender</italic> we aimed to make the rendering process as simple as possible. Nevertheless, some more technically demanding pre-processing steps of raw image data are necessary before they can be visualized in <italic>brainrender</italic>. In particular, a critical step for visualizing anatomical data is the registration to a reference template (e.g., one of the atlases provided by the AtlasAPI). While this step can be challenging and time consuming, the brainglobe suite provides software to facilitate this process (e.g., brainreg and bg-space), and alternative software tools have been developed before for this purpose (e.g., Song et al., (2020), Jin et al., (2019)). Additional information about data registration can be found in brainglobe's and brainrender's online documentation, as well as in the examples in brainrender GitHub repository.&#8221;</p><p>In addition, we have added a new section to the online documentation detailing how users can visualize their data in brainrender: https://docs.brainrender.info/usage/using<ext-link ext-link-type="uri" xlink:href="https://docs.brainrender.info/usage/using-your-data">-your-data,</ext-link> and we have added new examples to the online repository illustrating how users can load and re-orient their data to visualize them in <italic>brainrender</italic> alongside atlas data.</p><disp-quote content-type="editor-comment"><p>2) They state that it is easy to incorporate another atlas through the brainreg software, which can then be used with brainrender. As mentioned, it is our opinion that this is not a straight-forward task and that it would require significant programming skills to implement. Please provide more direction about how this can be done and what the constraints are.</p></disp-quote><p>We take the reviewer&#8217;s point and we have now removed the wording &#8220;new atlases can be easily adapted to work with the API&#8221; from the Results section. We have also edited the Discussion to directly address this issue:</p><p>&#8220;A related challenge is integrating new anatomical atlases into the AtlasAPI. While we anticipate that most users will not have this need, it is a non-trivial task that requires considerable programming skills. We believe that brainglobe&#8217;s AtlasAPI greatly facilitates this process, which is presented in Claudi et al. 2020 and has extensive online documentation (https://docs.brainglobe.info/bg<ext-link ext-link-type="uri" xlink:href="https://docs.brainglobe.info/bg-atlasapi/introduction">-atlasapi/introduction)</ext-link>.&#8221;</p><disp-quote content-type="editor-comment"><p>3) One of the stated advantages of this software is the ability to visualize multiple types of data and data from sources that are external to the atlas generators. Such visualizations can potentially be used to reveal consistency and/or novelty across data types. However, ultimately you would want to be able to measure these differences. Including examples about how to extract and compare features across data types and then visualize those differences with brainrender would strengthen the paper.</p></disp-quote><p>While noting that <italic>brainrender</italic> is intended to be mainly a visualisation tool, we have now expanded the subsection &#8220;Example code&#8221; to include two examples of how simple analyses can be performed with <italic>brainrender</italic> (new Figure 6). One of the examples shows how compute the distance between the centre of mass of two injection sites, and the other shows how to extract the brain location of specific channels of a silicon probe.</p><disp-quote content-type="editor-comment"><p>4) The snapshots of code presented as figures don't add much to the manuscript. Consider highlighting how-to videos instead.</p></disp-quote><p>We have followed the reviewer&#8217;s suggestion and added a supplementary video illustrating how to use <italic>brainrender</italic>&#8217;s GUI (Video 5). Since another reviewer had the opposite opinion on the usefulness of the code snapshots, we have opted to also keep them in the manuscript.</p><disp-quote content-type="editor-comment"><p>5) As stated in earlier comments, some of the functionality is not clear, particularly for people unfamiliar or new to 3D visualization or coding in general. Even experienced developers would benefit from specs for the various input data types, for example. A little more explanation, particularly in cases where the interface is different (such as with the helper functions used to make some actors), can greatly improve the user experience.</p></disp-quote><p>We thank the reviewer for pointing out places in which the code and the documentation were not clear. We have now expanded the online documentation. In particular, the point about clarifying what data types are supported and how data can be loaded into <italic>brainrender</italic> was also raised by another reviewer, and as we mention in the reply to point 1, the documentation now includes a section dedicated to IO functionality.</p><p>We have also addressed the use of helper functions in the documentation. In brief, Streamlines and Neuron actors are most often used to visualize multiple instances of such classes at once. We thus provide helper functions to facilitate the creation of multiple Streamlines and Neuron instances. As we show in the online examples, however, these classes can also be used without the helper function, similar to the other actor classes.</p><disp-quote content-type="editor-comment"><p>6) Though the purpose of this tool is not to develop a registration algorithm for anatomical reference atlases, or to perform data analysis, we view these steps as the most difficult and necessary steps in this process. 3D rendered data visualizations are informative, but not meaningful without accurate registration to begin with and without quantitative analysis to back it up. As they point out in the introduction, other, similar tools (natverse, MagellanMapper) have both visualization and analysis capabilities.</p></disp-quote><p>We agree that 3D data registration is indeed a technically demanding step that is necessary before data can be visualised in <italic>brainrender</italic>. As we mention in our reply to point 1, the revised manuscript now discusses this explicitly and we have added additional online documentation on this topic: https://docs.brainrender.info/usage/using-your-data/registeringdata.</p></body></sub-article></article>