<?xml version="1.0" encoding="UTF-8"?>
<rss version="2.0" xmlns:podcast="https://podcastindex.org/namespace/1.0"
    xmlns:content="http://purl.org/rss/1.0/modules/content/" xmlns:wfw="http://wellformedweb.org/CommentAPI/"
    xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:atom="http://www.w3.org/2005/Atom"
    xmlns:sy="http://purl.org/rss/1.0/modules/syndication/" xmlns:slash="http://purl.org/rss/1.0/modules/slash/"
    xmlns:itunes="http://www.itunes.com/dtds/podcast-1.0.dtd" xmlns:spotify="http://www.spotify.com/ns/rss">
    <channel>
        <title>Brain Inspired</title>
        <generator>Castos</generator>
        <atom:link href="https://feeds.castos.com/enkx" rel="self" type="application/rss+xml" />
        <link>https://braininspired.co</link>
        <description>Neuroscience and artificial intelligence work better together. Brain inspired is a celebration and exploration of the ideas driving our progress to understand intelligence. I interview experts about their work at the interface of neuroscience, artificial intelligence, cognitive science, philosophy, psychology, and more: the symbiosis of these overlapping fields, how they inform each other, where they differ, what the past brought us, and what the future brings. Topics include computational neuroscience, supervised machine learning, unsupervised learning, reinforcement learning, deep learning, convolutional and recurrent neural networks, decision-making science, AI agents, backpropagation, credit assignment, neuroengineering, neuromorphics, emergence, philosophy of mind, consciousness, general AI, spiking neural networks, data science, and a lot more. The podcast is not produced for a general audience. Instead, it aims to educate, challenge, inspire, and hopefully entertain those interested in learning more about neuroscience and AI.</description>
        <lastBuildDate>Wed, 22 Apr 2026 04:00:00 +0000</lastBuildDate>
        <language>en-US</language>
        <copyright>© 2019 Brain-Inspired</copyright>
        
        <spotify:limit recentCount="200" />
        
        <spotify:countryOfOrigin>
              
        </spotify:countryOfOrigin>
                    
                <itunes:subtitle>Neuroscience and artificial intelligence work better together. Brain inspired is a celebration and exploration of the ideas driving our progress to understand intelligence. I interview experts about their work at the interface of neuroscience, artificial intelligence, cognitive science, philosophy, psychology, and more: the symbiosis of these overlapping fields, how they inform each other, where they differ, what the past brought us, and what the future brings. Topics include computational neuroscience, supervised machine learning, unsupervised learning, reinforcement learning, deep learning, convolutional and recurrent neural networks, decision-making science, AI agents, backpropagation, credit assignment, neuroengineering, neuromorphics, emergence, philosophy of mind, consciousness, general AI, spiking neural networks, data science, and a lot more. The podcast is not produced for a general audience. Instead, it aims to educate, challenge, inspire, and hopefully entertain those interested in learning more about neuroscience and AI.</itunes:subtitle>
        <itunes:author>Paul Middlebrooks</itunes:author>
        <itunes:type>episodic</itunes:type>
        <itunes:summary>Neuroscience and artificial intelligence work better together. Brain inspired is a celebration and exploration of the ideas driving our progress to understand intelligence. I interview experts about their work at the interface of neuroscience, artificial intelligence, cognitive science, philosophy, psychology, and more: the symbiosis of these overlapping fields, how they inform each other, where they differ, what the past brought us, and what the future brings. Topics include computational neuroscience, supervised machine learning, unsupervised learning, reinforcement learning, deep learning, convolutional and recurrent neural networks, decision-making science, AI agents, backpropagation, credit assignment, neuroengineering, neuromorphics, emergence, philosophy of mind, consciousness, general AI, spiking neural networks, data science, and a lot more. The podcast is not produced for a general audience. Instead, it aims to educate, challenge, inspire, and hopefully entertain those interested in learning more about neuroscience and AI.</itunes:summary>
        <itunes:owner>
            <itunes:name>Paul Middlebrooks</itunes:name>
            <itunes:email>paul@braininspired.co</itunes:email>
        </itunes:owner>
        <itunes:explicit>false</itunes:explicit>
                    <itunes:image href="https://episodes.castos.com/braininspired/d5406a0d-dd82-4385-9c4e-cdc1347ce7ef-currentLogoLarge.png"></itunes:image>
        
                                    <itunes:category text="Science" />
                                                <itunes:category text="Technology" />
                                                <itunes:category text="Education" />
                    
                    <itunes:new-feed-url>https://feeds.castos.com/enkx</itunes:new-feed-url>
                
        
        <podcast:locked>yes</podcast:locked>
                    <podcast:funding url="https://www.patreon.com/braininspired">"Patreon"</podcast:funding>
                            <podcast:value type="lightning" method="keysend" suggested="0.00000020000">
                <podcast:valueRecipient name="podcaster" address="3L2C1JDB7oSQQgMHpYyKZMt9nDyzjhbgiL" split="100"
                    type="node"
                                         />
            </podcast:value>
                            <item>
                <title>
                    <![CDATA[BI 236 Liset de la Prida: Neurons, Ripples, and Manifolds]]>
                </title>
                <pubDate>Wed, 22 Apr 2026 04:00:00 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/2428594</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-236-liset-de-la-prida-neurons-ripples-and-manifolds</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes, full archive, and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p><em>The Transmitter</em> is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.</p>



<p>Read more about <a href="https://www.thetransmitter.org/partners/">our partnership</a>.</p>



<p>Check out this story:</p>



<p><a href="https://www.thetransmitter.org/defining-cell-types/from-genes-to-dynamics-examining-brain-cell-types-in-action-may-reveal-the-logic-of-brain-function/">From genes to dynamics: Examining brain cell types in action may reveal the logic of brain function</a></p>



<p>Sign up for <a href="https://www.thetransmitter.org/newsletters/">Brain Inspired email alerts</a> to be notified every time a new Brain Inspired episode is released.</p>



<p>To explore more neuroscience news and perspectives, visit <a href="http://thetransmitter.org">thetransmitter.org</a>.</p>



<p>Liset de la Prida is director of the Centro de Neurociencias Cajal in Madrid, Spain, where she runs the Laboratory of Neural Circuits. Today we discuss two main topics.</p>



<p>What drew me to invite Liset was her work on neural manifolds, which we've talked about a lot recently on this podcast. She studies how specific subtypes of neurons affect and control neural manifolds. More on that it in a second, because what drew her to study manifolds was her work on what are known as sharp wave ripples in the hippocampus. Sharp wave ripples are generally quick bursts of oscillatory activity as found in local field potential recordings that accompany little bursty sequences of action potentials fired off by sets of neurons. Those ripples have been associated with a quick replaying of some experience an organism has had, with the thinking that by replaying those sequences of neural activity associated with an event, it's helping to consolidate the memory for that event in the cortex. Like everything else, the story isn't so simple, and we talk about some of the findings that have added to the complexity of understanding what sharp wave ripples are doing, and the varieties of sharp wave ripples.</p>



<p>That varieties part is related to the second main thing we discuss, which is the varieties of neuron subtypes and their roles in shaping the manifolds we've discussed a lot recently. As a reminder, manifolds are dynamic structures along which populations of neural activity unfold over time, and they have proved to be one effective way of making sense of how large populations of neurons coordinate their activity to do useful things for our cognition. Liset is interested in the relation between sharp wave ripples and manifolds, and in how specific subtypes of neurons affect manifolds and cognition in general.</p>



<ul class="wp-block-list">
<li><a href="https://hippo-circuitlab.es/">Neural Circuits Lab</a></li>



<li><a href="https://bsky.app/profile/lmprida.bsky.social">@lmprida.bsky.social</a>; <a href="https://x.com/LMPrida">@LMPrida</a> </li>



<li>Book:
<ul class="wp-block-list">
<li><a href="https://almuzaralibros.com/fichalibro.php?libro=11892&amp;edi=5">Brain, space and time: The neuroscience of how we navigate reality, memory, or the future</a></li>
</ul>
</li>



<li>Related
<ul class="wp-block-list">
<li><a href="https://www.thetransmitter.org/defining-cell-types/from-genes-to-dynamics-examining-brain-cell-types-in-action-may-reveal-the-logic-of-brain-function/">From genes to dynamics: Examining brain cell types in action may reveal the logic of brain function</a></li>



<li><a href="https://www.cell.com/neuron/abstract/S0896-6273(25)00048-0?_returnURL=https%3A%2F%2Flinkinghub...."></a></li></ul></li></ul>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Support the show to get full episodes, full archive, and join the Discord community.









The Transmitter is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.



Read more about our partnership.



Check out this story:



From genes to dynamics: Examining brain cell types in action may reveal the logic of brain function



Sign up for Brain Inspired email alerts to be notified every time a new Brain Inspired episode is released.



To explore more neuroscience news and perspectives, visit thetransmitter.org.



Liset de la Prida is director of the Centro de Neurociencias Cajal in Madrid, Spain, where she runs the Laboratory of Neural Circuits. Today we discuss two main topics.



What drew me to invite Liset was her work on neural manifolds, which we've talked about a lot recently on this podcast. She studies how specific subtypes of neurons affect and control neural manifolds. More on that it in a second, because what drew her to study manifolds was her work on what are known as sharp wave ripples in the hippocampus. Sharp wave ripples are generally quick bursts of oscillatory activity as found in local field potential recordings that accompany little bursty sequences of action potentials fired off by sets of neurons. Those ripples have been associated with a quick replaying of some experience an organism has had, with the thinking that by replaying those sequences of neural activity associated with an event, it's helping to consolidate the memory for that event in the cortex. Like everything else, the story isn't so simple, and we talk about some of the findings that have added to the complexity of understanding what sharp wave ripples are doing, and the varieties of sharp wave ripples.



That varieties part is related to the second main thing we discuss, which is the varieties of neuron subtypes and their roles in shaping the manifolds we've discussed a lot recently. As a reminder, manifolds are dynamic structures along which populations of neural activity unfold over time, and they have proved to be one effective way of making sense of how large populations of neurons coordinate their activity to do useful things for our cognition. Liset is interested in the relation between sharp wave ripples and manifolds, and in how specific subtypes of neurons affect manifolds and cognition in general.




Neural Circuits Lab



@lmprida.bsky.social; @LMPrida 



Book:

Brain, space and time: The neuroscience of how we navigate reality, memory, or the future





Related

From genes to dynamics: Examining brain cell types in action may reveal the logic of brain function



]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 236 Liset de la Prida: Neurons, Ripples, and Manifolds]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes, full archive, and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p><em>The Transmitter</em> is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.</p>



<p>Read more about <a href="https://www.thetransmitter.org/partners/">our partnership</a>.</p>



<p>Check out this story:</p>



<p><a href="https://www.thetransmitter.org/defining-cell-types/from-genes-to-dynamics-examining-brain-cell-types-in-action-may-reveal-the-logic-of-brain-function/">From genes to dynamics: Examining brain cell types in action may reveal the logic of brain function</a></p>



<p>Sign up for <a href="https://www.thetransmitter.org/newsletters/">Brain Inspired email alerts</a> to be notified every time a new Brain Inspired episode is released.</p>



<p>To explore more neuroscience news and perspectives, visit <a href="http://thetransmitter.org">thetransmitter.org</a>.</p>



<p>Liset de la Prida is director of the Centro de Neurociencias Cajal in Madrid, Spain, where she runs the Laboratory of Neural Circuits. Today we discuss two main topics.</p>



<p>What drew me to invite Liset was her work on neural manifolds, which we've talked about a lot recently on this podcast. She studies how specific subtypes of neurons affect and control neural manifolds. More on that it in a second, because what drew her to study manifolds was her work on what are known as sharp wave ripples in the hippocampus. Sharp wave ripples are generally quick bursts of oscillatory activity as found in local field potential recordings that accompany little bursty sequences of action potentials fired off by sets of neurons. Those ripples have been associated with a quick replaying of some experience an organism has had, with the thinking that by replaying those sequences of neural activity associated with an event, it's helping to consolidate the memory for that event in the cortex. Like everything else, the story isn't so simple, and we talk about some of the findings that have added to the complexity of understanding what sharp wave ripples are doing, and the varieties of sharp wave ripples.</p>



<p>That varieties part is related to the second main thing we discuss, which is the varieties of neuron subtypes and their roles in shaping the manifolds we've discussed a lot recently. As a reminder, manifolds are dynamic structures along which populations of neural activity unfold over time, and they have proved to be one effective way of making sense of how large populations of neurons coordinate their activity to do useful things for our cognition. Liset is interested in the relation between sharp wave ripples and manifolds, and in how specific subtypes of neurons affect manifolds and cognition in general.</p>



<ul class="wp-block-list">
<li><a href="https://hippo-circuitlab.es/">Neural Circuits Lab</a></li>



<li><a href="https://bsky.app/profile/lmprida.bsky.social">@lmprida.bsky.social</a>; <a href="https://x.com/LMPrida">@LMPrida</a> </li>



<li>Book:
<ul class="wp-block-list">
<li><a href="https://almuzaralibros.com/fichalibro.php?libro=11892&amp;edi=5">Brain, space and time: The neuroscience of how we navigate reality, memory, or the future</a></li>
</ul>
</li>



<li>Related
<ul class="wp-block-list">
<li><a href="https://www.thetransmitter.org/defining-cell-types/from-genes-to-dynamics-examining-brain-cell-types-in-action-may-reveal-the-logic-of-brain-function/">From genes to dynamics: Examining brain cell types in action may reveal the logic of brain function</a></li>



<li><a href="https://www.cell.com/neuron/abstract/S0896-6273(25)00048-0?_returnURL=https%3A%2F%2Flinkinghub.elsevier.com%2Fretrieve%2Fpii%2FS0896627325000480%3Fshowall%3Dtrue">Cell-type-specific manifold analysis discloses independent geometric transformations in the hippocampal spatial code</a></li>



<li><a href="https://www.sciencedirect.com/science/article/pii/S0959438823001253">From cell types to population dynamics: Making hippocampal manifolds physiologically interpretable</a></li>
</ul>
</li>
</ul>



<p>0:00 - Intro
5:29 - Hippocampus
9:31 - Sharp wave ripples
27:30 - Oscillations and epiphenomena
33:37 - Sharp wave ripples to manifolds
43:54 - Manifolds and single neuron types
49:45 - Hippocampus and granularity of cell types
59:23 - Explanation across levels
1:19:38 - Manifolds and higher cognition
1:29:46 - Brain Space and Time</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/2428594/c1e-02nnb7727dhod755-pkn4mwx2u5dz-lirykf.mp3" length="100862827"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Support the show to get full episodes, full archive, and join the Discord community.









The Transmitter is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.



Read more about our partnership.



Check out this story:



From genes to dynamics: Examining brain cell types in action may reveal the logic of brain function



Sign up for Brain Inspired email alerts to be notified every time a new Brain Inspired episode is released.



To explore more neuroscience news and perspectives, visit thetransmitter.org.



Liset de la Prida is director of the Centro de Neurociencias Cajal in Madrid, Spain, where she runs the Laboratory of Neural Circuits. Today we discuss two main topics.



What drew me to invite Liset was her work on neural manifolds, which we've talked about a lot recently on this podcast. She studies how specific subtypes of neurons affect and control neural manifolds. More on that it in a second, because what drew her to study manifolds was her work on what are known as sharp wave ripples in the hippocampus. Sharp wave ripples are generally quick bursts of oscillatory activity as found in local field potential recordings that accompany little bursty sequences of action potentials fired off by sets of neurons. Those ripples have been associated with a quick replaying of some experience an organism has had, with the thinking that by replaying those sequences of neural activity associated with an event, it's helping to consolidate the memory for that event in the cortex. Like everything else, the story isn't so simple, and we talk about some of the findings that have added to the complexity of understanding what sharp wave ripples are doing, and the varieties of sharp wave ripples.



That varieties part is related to the second main thing we discuss, which is the varieties of neuron subtypes and their roles in shaping the manifolds we've discussed a lot recently. As a reminder, manifolds are dynamic structures along which populations of neural activity unfold over time, and they have proved to be one effective way of making sense of how large populations of neurons coordinate their activity to do useful things for our cognition. Liset is interested in the relation between sharp wave ripples and manifolds, and in how specific subtypes of neurons affect manifolds and cognition in general.




Neural Circuits Lab



@lmprida.bsky.social; @LMPrida 



Book:

Brain, space and time: The neuroscience of how we navigate reality, memory, or the future





Related

From genes to dynamics: Examining brain cell types in action may reveal the logic of brain function



]]>
                </itunes:summary>
                                                                            <itunes:duration>01:44:02</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 235 Romain Brette: The Brain, in Theory]]>
                </title>
                <pubDate>Wed, 08 Apr 2026 04:00:00 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/2415993</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-235-romain-brette-the-brain-in-theory</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes, full archive, and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p><em>The Transmitter</em> is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.</p>



<p>Read more about <a href="https://www.thetransmitter.org/partners/">our partnership</a>.</p>



<p>Sign up for <a href="https://www.thetransmitter.org/newsletters/">Brain Inspired email alerts</a> to be notified every time a new Brain Inspired episode is released.</p>



<p>To explore more neuroscience news and perspectives, visit <a href="http://thetransmitter.org">thetransmitter.org</a>.</p>





<p>Brains encode information in representations that perform computations to make predictions, right? No, no, no, and no. That's Romain Brette's response to those ill-conceived notions that neuroscience relies on to try to explain how cognition works. He uses more words to do that in his new book, <a href="https://amzn.to/3ZBHyBY" target="_blank" rel="noreferrer noopener">The Brain, in Theory</a>, which we discuss today. In the book Romain breaks down how many of the common metaphors we use don’t withstand scrutiny, and he offers alternative approaches more in line with what we know about how biological entities work. Along those lines, we discuss his ongoing work understanding the cognition of a single celled organism, the paramecium, and what his views might mean for artificial intelligence. This is a long episode, but there's a lot more to be explored in the book, so I recommend you read it. If you're a patreon supporter, I coaxed Romain back on for another 45 minutes to go deeper on his thoughts about how anticipation is the core of cognition, how predictive processing accounts like active inference miss the mark, and a few other topics.</p>



<ul class="wp-block-list">
<li><a href="http://romainbrette.fr/">Romain's website</a>. </li>



<li><a href="https://amzn.to/3ZBHyBY">The Brain, in Theory</a>.</li>
</ul>



<p>0:00 - Intro
4:01 - The Brain, In Theory
7:10 - Influences
13:11 - Process metaphysics
18:39 - Observer vs system perspective
21:24 - Information in the brain?
22:56 - Why this book?
29:52 - Computations in the brain
52:14 - Behavior is not a computation
1:07:20 - Paramecium cognition
1:22:02 - How should neuroscientists proceed?
1:29:09 - Cognition as collective behavior of autonomous cells
1:36:47 - Constraints, causes, and laws
1:52:36 - Hopes for the book to influence the field
1:55:04 - Thoughts about AI
2:02:13 - Computation and goals
2:08:17 - Anticipation vs prediction</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Support the show to get full episodes, full archive, and join the Discord community.









The Transmitter is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.



Read more about our partnership.



Sign up for Brain Inspired email alerts to be notified every time a new Brain Inspired episode is released.



To explore more neuroscience news and perspectives, visit thetransmitter.org.





Brains encode information in representations that perform computations to make predictions, right? No, no, no, and no. That's Romain Brette's response to those ill-conceived notions that neuroscience relies on to try to explain how cognition works. He uses more words to do that in his new book, The Brain, in Theory, which we discuss today. In the book Romain breaks down how many of the common metaphors we use don’t withstand scrutiny, and he offers alternative approaches more in line with what we know about how biological entities work. Along those lines, we discuss his ongoing work understanding the cognition of a single celled organism, the paramecium, and what his views might mean for artificial intelligence. This is a long episode, but there's a lot more to be explored in the book, so I recommend you read it. If you're a patreon supporter, I coaxed Romain back on for another 45 minutes to go deeper on his thoughts about how anticipation is the core of cognition, how predictive processing accounts like active inference miss the mark, and a few other topics.




Romain's website. 



The Brain, in Theory.




0:00 - Intro
4:01 - The Brain, In Theory
7:10 - Influences
13:11 - Process metaphysics
18:39 - Observer vs system perspective
21:24 - Information in the brain?
22:56 - Why this book?
29:52 - Computations in the brain
52:14 - Behavior is not a computation
1:07:20 - Paramecium cognition
1:22:02 - How should neuroscientists proceed?
1:29:09 - Cognition as collective behavior of autonomous cells
1:36:47 - Constraints, causes, and laws
1:52:36 - Hopes for the book to influence the field
1:55:04 - Thoughts about AI
2:02:13 - Computation and goals
2:08:17 - Anticipation vs prediction]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 235 Romain Brette: The Brain, in Theory]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes, full archive, and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p><em>The Transmitter</em> is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.</p>



<p>Read more about <a href="https://www.thetransmitter.org/partners/">our partnership</a>.</p>



<p>Sign up for <a href="https://www.thetransmitter.org/newsletters/">Brain Inspired email alerts</a> to be notified every time a new Brain Inspired episode is released.</p>



<p>To explore more neuroscience news and perspectives, visit <a href="http://thetransmitter.org">thetransmitter.org</a>.</p>





<p>Brains encode information in representations that perform computations to make predictions, right? No, no, no, and no. That's Romain Brette's response to those ill-conceived notions that neuroscience relies on to try to explain how cognition works. He uses more words to do that in his new book, <a href="https://amzn.to/3ZBHyBY" target="_blank" rel="noreferrer noopener">The Brain, in Theory</a>, which we discuss today. In the book Romain breaks down how many of the common metaphors we use don’t withstand scrutiny, and he offers alternative approaches more in line with what we know about how biological entities work. Along those lines, we discuss his ongoing work understanding the cognition of a single celled organism, the paramecium, and what his views might mean for artificial intelligence. This is a long episode, but there's a lot more to be explored in the book, so I recommend you read it. If you're a patreon supporter, I coaxed Romain back on for another 45 minutes to go deeper on his thoughts about how anticipation is the core of cognition, how predictive processing accounts like active inference miss the mark, and a few other topics.</p>



<ul class="wp-block-list">
<li><a href="http://romainbrette.fr/">Romain's website</a>. </li>



<li><a href="https://amzn.to/3ZBHyBY">The Brain, in Theory</a>.</li>
</ul>



<p>0:00 - Intro
4:01 - The Brain, In Theory
7:10 - Influences
13:11 - Process metaphysics
18:39 - Observer vs system perspective
21:24 - Information in the brain?
22:56 - Why this book?
29:52 - Computations in the brain
52:14 - Behavior is not a computation
1:07:20 - Paramecium cognition
1:22:02 - How should neuroscientists proceed?
1:29:09 - Cognition as collective behavior of autonomous cells
1:36:47 - Constraints, causes, and laws
1:52:36 - Hopes for the book to influence the field
1:55:04 - Thoughts about AI
2:02:13 - Computation and goals
2:08:17 - Anticipation vs prediction</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/2415993/c1e-z9xxc376jdu201xv-dm1kowqjaor3-0jpgmb.mp3" length="127231602"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Support the show to get full episodes, full archive, and join the Discord community.









The Transmitter is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.



Read more about our partnership.



Sign up for Brain Inspired email alerts to be notified every time a new Brain Inspired episode is released.



To explore more neuroscience news and perspectives, visit thetransmitter.org.





Brains encode information in representations that perform computations to make predictions, right? No, no, no, and no. That's Romain Brette's response to those ill-conceived notions that neuroscience relies on to try to explain how cognition works. He uses more words to do that in his new book, The Brain, in Theory, which we discuss today. In the book Romain breaks down how many of the common metaphors we use don’t withstand scrutiny, and he offers alternative approaches more in line with what we know about how biological entities work. Along those lines, we discuss his ongoing work understanding the cognition of a single celled organism, the paramecium, and what his views might mean for artificial intelligence. This is a long episode, but there's a lot more to be explored in the book, so I recommend you read it. If you're a patreon supporter, I coaxed Romain back on for another 45 minutes to go deeper on his thoughts about how anticipation is the core of cognition, how predictive processing accounts like active inference miss the mark, and a few other topics.




Romain's website. 



The Brain, in Theory.




0:00 - Intro
4:01 - The Brain, In Theory
7:10 - Influences
13:11 - Process metaphysics
18:39 - Observer vs system perspective
21:24 - Information in the brain?
22:56 - Why this book?
29:52 - Computations in the brain
52:14 - Behavior is not a computation
1:07:20 - Paramecium cognition
1:22:02 - How should neuroscientists proceed?
1:29:09 - Cognition as collective behavior of autonomous cells
1:36:47 - Constraints, causes, and laws
1:52:36 - Hopes for the book to influence the field
1:55:04 - Thoughts about AI
2:02:13 - Computation and goals
2:08:17 - Anticipation vs prediction]]>
                </itunes:summary>
                                                                            <itunes:duration>02:11:00</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 234 Juan Gallego: The Neural Manifold Manifesto]]>
                </title>
                <pubDate>Wed, 25 Mar 2026 04:00:00 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/2404868</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-234-juan-gallego-the-neural-manifold-manifesto</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes, full archive, and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p><em>The Transmitter</em> is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.</p>



<p>Read more about <a href="https://www.thetransmitter.org/partners/">our partnership</a>.</p>



<p>Check out this story: <a href="Neural%20manifolds%3A%20Latest%20buzzword%20or%20pathway%20to%20understand%20the%20brain?">Neural manifolds: Latest buzzword or pathway to understand the brain?</a></p>



<p>Sign up for <a href="https://www.thetransmitter.org/newsletters/">Brain Inspired email alerts</a> to be notified every time a new Brain Inspired episode is released.</p>



<p>To explore more neuroscience news and perspectives, visit <a href="http://thetransmitter.org">thetransmitter.org</a>.</p>



<p>Juan Gallego runs the <a href="https://www.fchampalimaud.org/research/groups/gallego-lab">Neocybernetics Lab</a> at the <a href="https://www.fchampalimaud.org/champalimaud-research">Champalimaud Centre for the Unknown</a> in Lisbon, Portugal, affiliated with the neuroscience of disease and neuroscience programs, and the centre for restorative neurotechnology.</p>



<p>Juan has worked a lot on neural manifolds - the mathematical objects neuroscience is using more and more to describe how big populations of neurons coordinate their activity to do useful things. In fact, he recently gave a short talk that he titled The Manifold Manifesto, because he was asked to be provocative. And he was provocative, suggesting that manifolds are real - as real as chairs and tables are, that they have causal power, and they might be a target of evolution. Of course he talked about his own and others work to support those claims. So today we discuss many of those themes, through the lens of his own and others work, and we talk about what keeps him up at night about the possible limits of using manifolds to connect brain activity with behavior and mental phenomena.</p>



<p>He's not just a manifold person, though. Juan is more broadly interested in motor control and how brains do it.</p>



<p>We also discuss his work in patients with spinal cord injuries, who don't have enough nerve connections to their muscles to actually move, but have enough nerve connections that some signal gets through. Juan and his colleagues can detect that little bit getting through, and use it to infer what behaviors the patients intend to do, and they can use that information to control actions in a computer simulation. The hope is that this will translate to controlling prosthetics to give spinal cord injury patients their mobility again.</p>



<ul class="wp-block-list">
<li><a href="https://www.fchampalimaud.org/research/groups/gallego-lab">Neocybernetics Lab</a>.</li>



<li><a href="https://bsky.app/profile/juangallego.bsky.social">@juangallego.bsky.social</a></li>



<li>Related papers
<ul class="wp-block-list">
<li><a href="https://www.nature.com/articles/s41593-025-02031-z">A neural manifold view of the brain</a>.</li>



<li><a href="https://www.nature.com/articles/s41467-024-54738-5">A neural implementation model of feedback-based motor learning</a>.</li>



<li><a href="https://www.cell.com/neuron/fulltext/S0896-6273(24)00922-X?uuid=uuid%3Ab00b7eef-9b3b-4791-85f9-6fd6ce0cabc7">Conjoint specification of action by neocortex and striatum</a>.</li>



<li><a href="https://www.sciencedirect.com/science/article/pii/S0959438824000059">Integrating across behaviors and timescales to understand the neural control of movement</a>.</li>



<li><a href="https://www.biorxiv.org/content/biorxiv/early/2026/03/06/2026.03.06.709637..."></a></li></ul></li></ul>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Support the show to get full episodes, full archive, and join the Discord community.









The Transmitter is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.



Read more about our partnership.



Check out this story: Neural manifolds: Latest buzzword or pathway to understand the brain?



Sign up for Brain Inspired email alerts to be notified every time a new Brain Inspired episode is released.



To explore more neuroscience news and perspectives, visit thetransmitter.org.



Juan Gallego runs the Neocybernetics Lab at the Champalimaud Centre for the Unknown in Lisbon, Portugal, affiliated with the neuroscience of disease and neuroscience programs, and the centre for restorative neurotechnology.



Juan has worked a lot on neural manifolds - the mathematical objects neuroscience is using more and more to describe how big populations of neurons coordinate their activity to do useful things. In fact, he recently gave a short talk that he titled The Manifold Manifesto, because he was asked to be provocative. And he was provocative, suggesting that manifolds are real - as real as chairs and tables are, that they have causal power, and they might be a target of evolution. Of course he talked about his own and others work to support those claims. So today we discuss many of those themes, through the lens of his own and others work, and we talk about what keeps him up at night about the possible limits of using manifolds to connect brain activity with behavior and mental phenomena.



He's not just a manifold person, though. Juan is more broadly interested in motor control and how brains do it.



We also discuss his work in patients with spinal cord injuries, who don't have enough nerve connections to their muscles to actually move, but have enough nerve connections that some signal gets through. Juan and his colleagues can detect that little bit getting through, and use it to infer what behaviors the patients intend to do, and they can use that information to control actions in a computer simulation. The hope is that this will translate to controlling prosthetics to give spinal cord injury patients their mobility again.




Neocybernetics Lab.



@juangallego.bsky.social



Related papers

A neural manifold view of the brain.



A neural implementation model of feedback-based motor learning.



Conjoint specification of action by neocortex and striatum.



Integrating across behaviors and timescales to understand the neural control of movement.



]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 234 Juan Gallego: The Neural Manifold Manifesto]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes, full archive, and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p><em>The Transmitter</em> is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.</p>



<p>Read more about <a href="https://www.thetransmitter.org/partners/">our partnership</a>.</p>



<p>Check out this story: <a href="Neural%20manifolds%3A%20Latest%20buzzword%20or%20pathway%20to%20understand%20the%20brain?">Neural manifolds: Latest buzzword or pathway to understand the brain?</a></p>



<p>Sign up for <a href="https://www.thetransmitter.org/newsletters/">Brain Inspired email alerts</a> to be notified every time a new Brain Inspired episode is released.</p>



<p>To explore more neuroscience news and perspectives, visit <a href="http://thetransmitter.org">thetransmitter.org</a>.</p>



<p>Juan Gallego runs the <a href="https://www.fchampalimaud.org/research/groups/gallego-lab">Neocybernetics Lab</a> at the <a href="https://www.fchampalimaud.org/champalimaud-research">Champalimaud Centre for the Unknown</a> in Lisbon, Portugal, affiliated with the neuroscience of disease and neuroscience programs, and the centre for restorative neurotechnology.</p>



<p>Juan has worked a lot on neural manifolds - the mathematical objects neuroscience is using more and more to describe how big populations of neurons coordinate their activity to do useful things. In fact, he recently gave a short talk that he titled The Manifold Manifesto, because he was asked to be provocative. And he was provocative, suggesting that manifolds are real - as real as chairs and tables are, that they have causal power, and they might be a target of evolution. Of course he talked about his own and others work to support those claims. So today we discuss many of those themes, through the lens of his own and others work, and we talk about what keeps him up at night about the possible limits of using manifolds to connect brain activity with behavior and mental phenomena.</p>



<p>He's not just a manifold person, though. Juan is more broadly interested in motor control and how brains do it.</p>



<p>We also discuss his work in patients with spinal cord injuries, who don't have enough nerve connections to their muscles to actually move, but have enough nerve connections that some signal gets through. Juan and his colleagues can detect that little bit getting through, and use it to infer what behaviors the patients intend to do, and they can use that information to control actions in a computer simulation. The hope is that this will translate to controlling prosthetics to give spinal cord injury patients their mobility again.</p>



<ul class="wp-block-list">
<li><a href="https://www.fchampalimaud.org/research/groups/gallego-lab">Neocybernetics Lab</a>.</li>



<li><a href="https://bsky.app/profile/juangallego.bsky.social">@juangallego.bsky.social</a></li>



<li>Related papers
<ul class="wp-block-list">
<li><a href="https://www.nature.com/articles/s41593-025-02031-z">A neural manifold view of the brain</a>.</li>



<li><a href="https://www.nature.com/articles/s41467-024-54738-5">A neural implementation model of feedback-based motor learning</a>.</li>



<li><a href="https://www.cell.com/neuron/fulltext/S0896-6273(24)00922-X?uuid=uuid%3Ab00b7eef-9b3b-4791-85f9-6fd6ce0cabc7">Conjoint specification of action by neocortex and striatum</a>.</li>



<li><a href="https://www.sciencedirect.com/science/article/pii/S0959438824000059">Integrating across behaviors and timescales to understand the neural control of movement</a>.</li>



<li><a href="https://www.biorxiv.org/content/biorxiv/early/2026/03/06/2026.03.06.709637.full.pdf">Evolutionarily conserved neural dynamics across mice, monkeys, and humans</a>.</li>
</ul>
</li>
</ul>



<p>Read the <a href="https://www.thetransmitter.org/wp-content/uploads/2026/03/BI-234-transcript-juan-gallego.pdf" target="_blank" rel="noreferrer noopener">transcript</a>.</p>



<p>0:00 - Intro
4:37 - Manifolds
14:30 - Strengths and weaknesses
24:32 - Conserved manifolds across animals and species
34:31 - Causality and manifolds
47:29 - Constraints and causes
51:05 - What to measure
58:55 - Complexity and manifolds
1:10:29 - Juan's background
1:14:08 - Prosthetics for spinal cord injuries
1:41:06 - Integrating across behaviors and timescales
1:46:56 - Conjoint specification of action by neocortex and striatum.</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/2404868/c1e-d5wwao607qf590qd-v6wk40g7uj3g-sxjyxz.mp3" length="117960832"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Support the show to get full episodes, full archive, and join the Discord community.









The Transmitter is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.



Read more about our partnership.



Check out this story: Neural manifolds: Latest buzzword or pathway to understand the brain?



Sign up for Brain Inspired email alerts to be notified every time a new Brain Inspired episode is released.



To explore more neuroscience news and perspectives, visit thetransmitter.org.



Juan Gallego runs the Neocybernetics Lab at the Champalimaud Centre for the Unknown in Lisbon, Portugal, affiliated with the neuroscience of disease and neuroscience programs, and the centre for restorative neurotechnology.



Juan has worked a lot on neural manifolds - the mathematical objects neuroscience is using more and more to describe how big populations of neurons coordinate their activity to do useful things. In fact, he recently gave a short talk that he titled The Manifold Manifesto, because he was asked to be provocative. And he was provocative, suggesting that manifolds are real - as real as chairs and tables are, that they have causal power, and they might be a target of evolution. Of course he talked about his own and others work to support those claims. So today we discuss many of those themes, through the lens of his own and others work, and we talk about what keeps him up at night about the possible limits of using manifolds to connect brain activity with behavior and mental phenomena.



He's not just a manifold person, though. Juan is more broadly interested in motor control and how brains do it.



We also discuss his work in patients with spinal cord injuries, who don't have enough nerve connections to their muscles to actually move, but have enough nerve connections that some signal gets through. Juan and his colleagues can detect that little bit getting through, and use it to infer what behaviors the patients intend to do, and they can use that information to control actions in a computer simulation. The hope is that this will translate to controlling prosthetics to give spinal cord injury patients their mobility again.




Neocybernetics Lab.



@juangallego.bsky.social



Related papers

A neural manifold view of the brain.



A neural implementation model of feedback-based motor learning.



Conjoint specification of action by neocortex and striatum.



Integrating across behaviors and timescales to understand the neural control of movement.



]]>
                </itunes:summary>
                                                                            <itunes:duration>02:01:31</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 233 Tom Griffiths: The Laws of Thought]]>
                </title>
                <pubDate>Wed, 11 Mar 2026 04:00:00 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/2389668</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-233-tom-griffiths-the-laws-of-thought</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes, full archive, and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p><em>The Transmitter</em> is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.</p>



<p>Read more about <a href="https://www.thetransmitter.org/partners/">our partnership</a>.</p>



<p>Sign up for <a href="https://www.thetransmitter.org/newsletters/">Brain Inspired email alerts</a> to be notified every time a new Brain Inspired episode is released.</p>



<p>To explore more neuroscience news and perspectives, visit <a href="http://thetransmitter.org">thetransmitter.org</a>.</p>





<p>Tom Griffiths directs both the <a href="https://cocosci.princeton.edu/">Computational Cognitive Science Lab</a> and the <a href="https://ai.princeton.edu/ai-lab">Princeton Laboratory for Artificial Intelligence</a> at Princeton University. He's been on brain inspired before to talk about his previous book <a href="https://amzn.to/4s1sRnR">Algorithms to Live By: The Computer Science of Human Decisions</a>, which he co-wrote with Brian Christian. Today he's here to talk about his new book, <a href="https://amzn.to/3O9Dx5j">The Laws of Thought: The Quest for a Mathematical Theory of the Mind</a>. In this book, Tom explains how the three pillars of logic, neural networks, and probability theory complement each other to explain cognition, arguing we are on the doorstep to settling what mathematical principles - the so-called "laws of thought" - underly our cognition. So we discuss a little bit about a lot of things, including the concepts themselves, the people who have generated and worked on those concepts. I should also mentioned, Tom recorded a bunch of his interviews with people he writes about, and he's edited and polished those into a podcast called the Cognition Project, which I have enjoyed after reading the book, and I think you'd enjoy it either before or after you read the book.</p>



<ul class="wp-block-list">
<li><a href="https://cocosci.princeton.edu/">Computational Cognitive Science Lab</a></li>



<li><a href="https://ai.princeton.edu/ai-lab">Princeton Laboratory for Artificial Intelligence</a></li>



<li>Social: <a href="https://x.com/cocosci_lab">@cocosci_lab</a>; <a href="https://bsky.app/profile/did:plc:gopsyxl7h53zecg7o3h5wbot">@cocoscilab.bsky.social</a></li>



<li>Book:
<ul class="wp-block-list">
<li><a href="https://amzn.to/3O9Dx5j">The Laws of Thought: The Quest for a Mathematical Theory of the Mind</a>.</li>
</ul>
</li>



<li><a href="https://open.spotify.com/show/7rhwBGhEQCtO9cBguazFsq">Podcast: The Cognition Project</a></li>
</ul>



<p>Read the <a href="https://www.thetransmitter.org/wp-content/uploads/2026/03/BI-233-transcript-tom-griffiths-final.pdf" target="_blank" rel="noreferrer noopener">transcript</a>.</p>



<p>0:00 - Intro
3:20 - Tom's approach
7:19 - 3 pillars of the laws of thought
28:24 - Logic and formal systems strip away meaning
39:04 - Nature of thought
50:35 - Kahneman and Tversky
1:015:12 - Enabling constraints and inductive bias
1:12:51 - Hidden layers, probability, and hidden markov models
1:20:47 - Conscious vs nonconscious
1:23:43 - Feelings
1:31:26 - Personal</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Support the show to get full episodes, full archive, and join the Discord community.









The Transmitter is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.



Read more about our partnership.



Sign up for Brain Inspired email alerts to be notified every time a new Brain Inspired episode is released.



To explore more neuroscience news and perspectives, visit thetransmitter.org.





Tom Griffiths directs both the Computational Cognitive Science Lab and the Princeton Laboratory for Artificial Intelligence at Princeton University. He's been on brain inspired before to talk about his previous book Algorithms to Live By: The Computer Science of Human Decisions, which he co-wrote with Brian Christian. Today he's here to talk about his new book, The Laws of Thought: The Quest for a Mathematical Theory of the Mind. In this book, Tom explains how the three pillars of logic, neural networks, and probability theory complement each other to explain cognition, arguing we are on the doorstep to settling what mathematical principles - the so-called "laws of thought" - underly our cognition. So we discuss a little bit about a lot of things, including the concepts themselves, the people who have generated and worked on those concepts. I should also mentioned, Tom recorded a bunch of his interviews with people he writes about, and he's edited and polished those into a podcast called the Cognition Project, which I have enjoyed after reading the book, and I think you'd enjoy it either before or after you read the book.




Computational Cognitive Science Lab



Princeton Laboratory for Artificial Intelligence



Social: @cocosci_lab; @cocoscilab.bsky.social



Book:

The Laws of Thought: The Quest for a Mathematical Theory of the Mind.





Podcast: The Cognition Project




Read the transcript.



0:00 - Intro
3:20 - Tom's approach
7:19 - 3 pillars of the laws of thought
28:24 - Logic and formal systems strip away meaning
39:04 - Nature of thought
50:35 - Kahneman and Tversky
1:015:12 - Enabling constraints and inductive bias
1:12:51 - Hidden layers, probability, and hidden markov models
1:20:47 - Conscious vs nonconscious
1:23:43 - Feelings
1:31:26 - Personal]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 233 Tom Griffiths: The Laws of Thought]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes, full archive, and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p><em>The Transmitter</em> is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.</p>



<p>Read more about <a href="https://www.thetransmitter.org/partners/">our partnership</a>.</p>



<p>Sign up for <a href="https://www.thetransmitter.org/newsletters/">Brain Inspired email alerts</a> to be notified every time a new Brain Inspired episode is released.</p>



<p>To explore more neuroscience news and perspectives, visit <a href="http://thetransmitter.org">thetransmitter.org</a>.</p>





<p>Tom Griffiths directs both the <a href="https://cocosci.princeton.edu/">Computational Cognitive Science Lab</a> and the <a href="https://ai.princeton.edu/ai-lab">Princeton Laboratory for Artificial Intelligence</a> at Princeton University. He's been on brain inspired before to talk about his previous book <a href="https://amzn.to/4s1sRnR">Algorithms to Live By: The Computer Science of Human Decisions</a>, which he co-wrote with Brian Christian. Today he's here to talk about his new book, <a href="https://amzn.to/3O9Dx5j">The Laws of Thought: The Quest for a Mathematical Theory of the Mind</a>. In this book, Tom explains how the three pillars of logic, neural networks, and probability theory complement each other to explain cognition, arguing we are on the doorstep to settling what mathematical principles - the so-called "laws of thought" - underly our cognition. So we discuss a little bit about a lot of things, including the concepts themselves, the people who have generated and worked on those concepts. I should also mentioned, Tom recorded a bunch of his interviews with people he writes about, and he's edited and polished those into a podcast called the Cognition Project, which I have enjoyed after reading the book, and I think you'd enjoy it either before or after you read the book.</p>



<ul class="wp-block-list">
<li><a href="https://cocosci.princeton.edu/">Computational Cognitive Science Lab</a></li>



<li><a href="https://ai.princeton.edu/ai-lab">Princeton Laboratory for Artificial Intelligence</a></li>



<li>Social: <a href="https://x.com/cocosci_lab">@cocosci_lab</a>; <a href="https://bsky.app/profile/did:plc:gopsyxl7h53zecg7o3h5wbot">@cocoscilab.bsky.social</a></li>



<li>Book:
<ul class="wp-block-list">
<li><a href="https://amzn.to/3O9Dx5j">The Laws of Thought: The Quest for a Mathematical Theory of the Mind</a>.</li>
</ul>
</li>



<li><a href="https://open.spotify.com/show/7rhwBGhEQCtO9cBguazFsq">Podcast: The Cognition Project</a></li>
</ul>



<p>Read the <a href="https://www.thetransmitter.org/wp-content/uploads/2026/03/BI-233-transcript-tom-griffiths-final.pdf" target="_blank" rel="noreferrer noopener">transcript</a>.</p>



<p>0:00 - Intro
3:20 - Tom's approach
7:19 - 3 pillars of the laws of thought
28:24 - Logic and formal systems strip away meaning
39:04 - Nature of thought
50:35 - Kahneman and Tversky
1:015:12 - Enabling constraints and inductive bias
1:12:51 - Hidden layers, probability, and hidden markov models
1:20:47 - Conscious vs nonconscious
1:23:43 - Feelings
1:31:26 - Personal</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/2389668/c1e-kd00fd4nkpbzzn4m-z348nwqnbv4r-j3rn6d.mp3" length="97381433"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Support the show to get full episodes, full archive, and join the Discord community.









The Transmitter is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.



Read more about our partnership.



Sign up for Brain Inspired email alerts to be notified every time a new Brain Inspired episode is released.



To explore more neuroscience news and perspectives, visit thetransmitter.org.





Tom Griffiths directs both the Computational Cognitive Science Lab and the Princeton Laboratory for Artificial Intelligence at Princeton University. He's been on brain inspired before to talk about his previous book Algorithms to Live By: The Computer Science of Human Decisions, which he co-wrote with Brian Christian. Today he's here to talk about his new book, The Laws of Thought: The Quest for a Mathematical Theory of the Mind. In this book, Tom explains how the three pillars of logic, neural networks, and probability theory complement each other to explain cognition, arguing we are on the doorstep to settling what mathematical principles - the so-called "laws of thought" - underly our cognition. So we discuss a little bit about a lot of things, including the concepts themselves, the people who have generated and worked on those concepts. I should also mentioned, Tom recorded a bunch of his interviews with people he writes about, and he's edited and polished those into a podcast called the Cognition Project, which I have enjoyed after reading the book, and I think you'd enjoy it either before or after you read the book.




Computational Cognitive Science Lab



Princeton Laboratory for Artificial Intelligence



Social: @cocosci_lab; @cocoscilab.bsky.social



Book:

The Laws of Thought: The Quest for a Mathematical Theory of the Mind.





Podcast: The Cognition Project




Read the transcript.



0:00 - Intro
3:20 - Tom's approach
7:19 - 3 pillars of the laws of thought
28:24 - Logic and formal systems strip away meaning
39:04 - Nature of thought
50:35 - Kahneman and Tversky
1:015:12 - Enabling constraints and inductive bias
1:12:51 - Hidden layers, probability, and hidden markov models
1:20:47 - Conscious vs nonconscious
1:23:43 - Feelings
1:31:26 - Personal]]>
                </itunes:summary>
                                                                            <itunes:duration>01:40:13</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 232 How Should Neuroscience Integrate with Ecological Psychology?]]>
                </title>
                <pubDate>Wed, 25 Feb 2026 05:00:00 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/2373150</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-232-how-should-neuroscience-integrate-with-ecological-psychology</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes, full archive, and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p><em>The Transmitter</em> is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.</p>



<p>Read more about <a href="https://www.thetransmitter.org/partners/">our partnership</a>.</p>



<p>Sign up for <a href="https://www.thetransmitter.org/newsletters/">Brain Inspired email alerts</a> to be notified every time a new Brain Inspired episode is released.</p>



<p>To explore more neuroscience news and perspectives, visit <a href="http://thetransmitter.org">thetransmitter.org</a>.</p>



<p>How does brain activity explain your perceptions and your actions? That's what neuroscientists ask. How does the interaction between brain, body, and environment explain your perceptions and actions? That's what ecological psychologists ask… sometimes leaving the brain out of the equation altogether. These different approaches to perception and action come with different terms, concepts, underlying assumptions, and targets of explanations.</p>



<p>So what happens when neuroscientists are inspired by ecological psychology but don't necessarily want take on, or are ignorant of, the fundamental principles underlying ecological psychology?</p>



<p>This happens all the time, like how AI was "inspired" by the most rudimentary understanding of how brains work, and took terms from neuroscience like neuron, neural network, and so on, as stand-ins for their models. This has in some sense re-defined what people mean by neuron, and neural network, and how they function and how we should think of them.</p>



<p>Modern neuroscience, with better data collecting tools, has taken a turn toward more naturalistic experimental paradigms to study how brains operate in more ecologically valid situations than what has mostly been used in the history of neuroscience - highly controlled tasks and experimental setups that arguably have very little to do with how organisms evolved to interact with the world to do cognitive things.</p>



<p>One problem with this turn is that we neuroscientists don't have ready-made theoretical tools to deal with the less constrained massive amounts of data the new approach affords. This has led some neuroscientists to seek those theoretical concepts elsewhere. One of those places that offers those theoretical tools is ecological psychology, developed by James and Eleanor Gibson in the mid-20th century, and continued since then by many adherents of the concepts introduced by ecological psychology. Those concepts are very specific with regard to how and what to explain regarding perception and action.</p>



<p><a href="https://dewitlab.wordpress.com/">Matthieu de Wit</a> is an associate professor at <a href="https://www.muhlenberg.edu/">Muhlenberg College</a> in Pennsylvania, who runst the ECON Lab, as in Ecological Neuroscience. <a href="https://luishfavela.wixsite.com/luishfavela">Luis Favela</a> is an associate professor at Indiana University. He's been on before to talk about his book <a href="https://amzn.to/3LbSgrI">The Ecological Brain</a>. And <a href="https://www.um.es/mintlab/index.php/about/people/vicente-raja/">Vicente Raja</a> is a research fellow at University of Murcia in Spain, and he's been on before to talk about ecological psychology and neuroscience.</p>



<p>With their deep expertise in ecological psychology, they are keenly interested in how neuroscience write large adopts various facets of ecological psychology. Do neuroscientists have it right? Do they need to have it right? Is there something being lost in translation? How should neuroscientists adop...</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Support the show to get full episodes, full archive, and join the Discord community.









The Transmitter is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.



Read more about our partnership.



Sign up for Brain Inspired email alerts to be notified every time a new Brain Inspired episode is released.



To explore more neuroscience news and perspectives, visit thetransmitter.org.



How does brain activity explain your perceptions and your actions? That's what neuroscientists ask. How does the interaction between brain, body, and environment explain your perceptions and actions? That's what ecological psychologists ask… sometimes leaving the brain out of the equation altogether. These different approaches to perception and action come with different terms, concepts, underlying assumptions, and targets of explanations.



So what happens when neuroscientists are inspired by ecological psychology but don't necessarily want take on, or are ignorant of, the fundamental principles underlying ecological psychology?



This happens all the time, like how AI was "inspired" by the most rudimentary understanding of how brains work, and took terms from neuroscience like neuron, neural network, and so on, as stand-ins for their models. This has in some sense re-defined what people mean by neuron, and neural network, and how they function and how we should think of them.



Modern neuroscience, with better data collecting tools, has taken a turn toward more naturalistic experimental paradigms to study how brains operate in more ecologically valid situations than what has mostly been used in the history of neuroscience - highly controlled tasks and experimental setups that arguably have very little to do with how organisms evolved to interact with the world to do cognitive things.



One problem with this turn is that we neuroscientists don't have ready-made theoretical tools to deal with the less constrained massive amounts of data the new approach affords. This has led some neuroscientists to seek those theoretical concepts elsewhere. One of those places that offers those theoretical tools is ecological psychology, developed by James and Eleanor Gibson in the mid-20th century, and continued since then by many adherents of the concepts introduced by ecological psychology. Those concepts are very specific with regard to how and what to explain regarding perception and action.



Matthieu de Wit is an associate professor at Muhlenberg College in Pennsylvania, who runst the ECON Lab, as in Ecological Neuroscience. Luis Favela is an associate professor at Indiana University. He's been on before to talk about his book The Ecological Brain. And Vicente Raja is a research fellow at University of Murcia in Spain, and he's been on before to talk about ecological psychology and neuroscience.



With their deep expertise in ecological psychology, they are keenly interested in how neuroscience write large adopts various facets of ecological psychology. Do neuroscientists have it right? Do they need to have it right? Is there something being lost in translation? How should neuroscientists adop...]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 232 How Should Neuroscience Integrate with Ecological Psychology?]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes, full archive, and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p><em>The Transmitter</em> is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.</p>



<p>Read more about <a href="https://www.thetransmitter.org/partners/">our partnership</a>.</p>



<p>Sign up for <a href="https://www.thetransmitter.org/newsletters/">Brain Inspired email alerts</a> to be notified every time a new Brain Inspired episode is released.</p>



<p>To explore more neuroscience news and perspectives, visit <a href="http://thetransmitter.org">thetransmitter.org</a>.</p>



<p>How does brain activity explain your perceptions and your actions? That's what neuroscientists ask. How does the interaction between brain, body, and environment explain your perceptions and actions? That's what ecological psychologists ask… sometimes leaving the brain out of the equation altogether. These different approaches to perception and action come with different terms, concepts, underlying assumptions, and targets of explanations.</p>



<p>So what happens when neuroscientists are inspired by ecological psychology but don't necessarily want take on, or are ignorant of, the fundamental principles underlying ecological psychology?</p>



<p>This happens all the time, like how AI was "inspired" by the most rudimentary understanding of how brains work, and took terms from neuroscience like neuron, neural network, and so on, as stand-ins for their models. This has in some sense re-defined what people mean by neuron, and neural network, and how they function and how we should think of them.</p>



<p>Modern neuroscience, with better data collecting tools, has taken a turn toward more naturalistic experimental paradigms to study how brains operate in more ecologically valid situations than what has mostly been used in the history of neuroscience - highly controlled tasks and experimental setups that arguably have very little to do with how organisms evolved to interact with the world to do cognitive things.</p>



<p>One problem with this turn is that we neuroscientists don't have ready-made theoretical tools to deal with the less constrained massive amounts of data the new approach affords. This has led some neuroscientists to seek those theoretical concepts elsewhere. One of those places that offers those theoretical tools is ecological psychology, developed by James and Eleanor Gibson in the mid-20th century, and continued since then by many adherents of the concepts introduced by ecological psychology. Those concepts are very specific with regard to how and what to explain regarding perception and action.</p>



<p><a href="https://dewitlab.wordpress.com/">Matthieu de Wit</a> is an associate professor at <a href="https://www.muhlenberg.edu/">Muhlenberg College</a> in Pennsylvania, who runst the ECON Lab, as in Ecological Neuroscience. <a href="https://luishfavela.wixsite.com/luishfavela">Luis Favela</a> is an associate professor at Indiana University. He's been on before to talk about his book <a href="https://amzn.to/3LbSgrI">The Ecological Brain</a>. And <a href="https://www.um.es/mintlab/index.php/about/people/vicente-raja/">Vicente Raja</a> is a research fellow at University of Murcia in Spain, and he's been on before to talk about ecological psychology and neuroscience.</p>



<p>With their deep expertise in ecological psychology, they are keenly interested in how neuroscience write large adopts various facets of ecological psychology. Do neuroscientists have it right? Do they need to have it right? Is there something being lost in translation? How should neuroscientists adopt ecological psychology for an ecological neuroscience? That's what we're discussing today.</p>



<p>More broadly, this is also a story about what it's like doing research that isn't part of the current mainstream approach, in this doing ecological psychology under the long shadow cast by the computational mechanistic neuro-centric dominant paradigm in neuroscience currently.</p>



<ul class="wp-block-list">
<li>Matthieu <a href="https://dewitlab.wordpress.com/">de Wit lab</a>.
<ul class="wp-block-list">
<li><a href="https://bsky.app/profile/did:plc:adcp5ggarbjp74sxmszapmzd">@dewitmm.bsky.social</a></li>
</ul>
</li>



<li><a href="https://luishfavela.wixsite.com/luishfavela">Luis Favela</a>.
<ul class="wp-block-list">
<li><a href="https://amzn.to/3LbSgrI">The Ecological Brain: Unifying the Sciences of Brain, Body, and Environment</a></li>
</ul>
</li>



<li>Vicente Raja
<ul class="wp-block-list">
<li><a href="https://bsky.app/profile/diovicen.bsky.social">@diovicen.bsky.social</a></li>



<li><a href="https://www.um.es/mintlab/index.php/about/people/vicente-raja/">MINT Lab</a>.</li>



<li><a href="https://amzn.to/3VVBxOD">Ecological psychology</a> </li>
</ul>
</li>



<li>Previous episodes:<ul><li><a href="https://braininspired.co/?s=favela">BI 223 Vicente Raja: Ecological Psychology Motifs in Neuroscience</a></li><li><a href="https://braininspired.co/podcast/190/">BI 190 Luis Favela: The Ecological Brain</a></li></ul>
<ul class="wp-block-list">
<li><a href="https://braininspired.co/podcast/213/">BI 213 Representations in Minds and Brains</a></li>
</ul>
</li>
</ul>



<p>Read the <a href="https://www.thetransmitter.org/wp-content/uploads/2026/02/BI-232-transcript-ecological-psychology.pdf" target="_blank" rel="noreferrer noopener">transcript</a>.</p>



<p>0:00 - Intro
8:23 - How Louie, Vicente, and Matthieu know each other
11:16 - Past present and future of relation between neuroscience and ecological psychology
17:02 - Why resistance to integrating neuroscience into ecological psychology?
28:26 - What counts as ecological psychology?
33:32 - Affordances properly understood
40:33 - Ecological information
47:58 - Importance of dynamics
48:59 - What's at stake?
58:27 - Environment intervention
1:16:21 - When ecological neuroscience publishes
1:31:25 - Neuroscientists escape hatch
1:38:04 - Is ecological psychology a theory of everything?</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/2373150/c1e-7kpphv2ng9aw7m05-v6w57dkqbvm0-vzyir6.mp3" length="109864056"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Support the show to get full episodes, full archive, and join the Discord community.









The Transmitter is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.



Read more about our partnership.



Sign up for Brain Inspired email alerts to be notified every time a new Brain Inspired episode is released.



To explore more neuroscience news and perspectives, visit thetransmitter.org.



How does brain activity explain your perceptions and your actions? That's what neuroscientists ask. How does the interaction between brain, body, and environment explain your perceptions and actions? That's what ecological psychologists ask… sometimes leaving the brain out of the equation altogether. These different approaches to perception and action come with different terms, concepts, underlying assumptions, and targets of explanations.



So what happens when neuroscientists are inspired by ecological psychology but don't necessarily want take on, or are ignorant of, the fundamental principles underlying ecological psychology?



This happens all the time, like how AI was "inspired" by the most rudimentary understanding of how brains work, and took terms from neuroscience like neuron, neural network, and so on, as stand-ins for their models. This has in some sense re-defined what people mean by neuron, and neural network, and how they function and how we should think of them.



Modern neuroscience, with better data collecting tools, has taken a turn toward more naturalistic experimental paradigms to study how brains operate in more ecologically valid situations than what has mostly been used in the history of neuroscience - highly controlled tasks and experimental setups that arguably have very little to do with how organisms evolved to interact with the world to do cognitive things.



One problem with this turn is that we neuroscientists don't have ready-made theoretical tools to deal with the less constrained massive amounts of data the new approach affords. This has led some neuroscientists to seek those theoretical concepts elsewhere. One of those places that offers those theoretical tools is ecological psychology, developed by James and Eleanor Gibson in the mid-20th century, and continued since then by many adherents of the concepts introduced by ecological psychology. Those concepts are very specific with regard to how and what to explain regarding perception and action.



Matthieu de Wit is an associate professor at Muhlenberg College in Pennsylvania, who runst the ECON Lab, as in Ecological Neuroscience. Luis Favela is an associate professor at Indiana University. He's been on before to talk about his book The Ecological Brain. And Vicente Raja is a research fellow at University of Murcia in Spain, and he's been on before to talk about ecological psychology and neuroscience.



With their deep expertise in ecological psychology, they are keenly interested in how neuroscience write large adopts various facets of ecological psychology. Do neuroscientists have it right? Do they need to have it right? Is there something being lost in translation? How should neuroscientists adop...]]>
                </itunes:summary>
                                                                            <itunes:duration>01:53:10</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 231 Jaan Aru: Conscious AI? Not Even Close!]]>
                </title>
                <pubDate>Wed, 11 Feb 2026 05:00:00 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/2358658</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-231-jaan-aru-conscious-ai-not-even-close</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes, full archive, and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p><em>The Transmitter</em> is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.</p>



<p>Read more about <a href="https://www.thetransmitter.org/partners/">our partnership</a>.</p>



<p>Sign up for <a href="https://www.thetransmitter.org/newsletters/">Brain Inspired email alerts</a> to be notified every time a new Brain Inspired episode is released.</p>



<p>To explore more neuroscience news and perspectives, visit <a href="http://thetransmitter.org">thetransmitter.org</a>.</p>



<p>Jaan Aru is a co-principal investigator of the Natural and Artificial Intelligence Lab at the University of Tartu in Estonia, where he is an associate professor. Jaan's name has kept popping up on papers I've read over the last few years, sometimes alongside other guests I've had on the podcast, like <a href="https://braininspired.co/podcast/138/">Matthew Larkum</a> and <a href="https://braininspired.co/podcast/121/">Mac Shine</a>. With those people and others, he has co-authored papers exploring how some of the pesky biological details of brains might be important for our subjective conscious experience, details like dendritic integration, and loops between the cortex and the thalamus. Turns out a recurring theme in his work is to connect lower-level nitty gritty biological details with higher level cognitive functioning. And he has some thoughts about what that might mean for the prospects of consciousness in  artificial systems. And we also touch on his more recent interest in understanding the brain basis of insight and creativity, connecting some of the more mundane kinds of insights during problem solving, for example, with some of the more profound kinds of insights during mystical and psychedelic experiences, for example.</p>



<ul class="wp-block-list">
<li><a href="https://nail.cs.ut.ee/">Natural &amp; Artificial Intelligence Lab</a></li>



<li>Social: <a href="https://bsky.app/profile/did:plc:pesund73gzmi4skufhb2mtye">@jaanaru.bsky.social</a></li>



<li>Related papers
<ul class="wp-block-list">
<li><a href="https://www.sciencedirect.com/science/article/abs/pii/S0166223623002278">The feasibility of artificial consciousness through the lens of neuroscience</a></li>



<li><a href="https://www.sciencedirect.com/science/article/pii/S0149763425005251">On biological and artificial consciousness: A case for biological computationalism</a></li>



<li><a href="https://www.cell.com/trends/cognitive-sciences/fulltext/S1364-6613(20)30175-3">Cellular mechanisms of conscious processing</a>.</li>



<li><a href="https://www.tandfonline.com/doi/abs/10.1080/09515089.2026.2613030">Realization experiences: a convergent account of insight and mystical experiences</a>.</li>
</ul>
</li>
</ul>



<p>0:00 - Intro
4:21 - Jaan's approach
8:51 - Likelihood of machine consciousness
18:58 - Across-levels understanding
30:23 - Intelligence vs consciousness
36:27 - Connecting low-level implementation to cognition
45:42 - Organization and constraints
52:28 - Thalamocortical loops
1:04:18 - Artificial consciousness
1:14:34 - Theories of consciousness
1:23:16 - Creativity and insight
1:37:26 - Science research in Estonia</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Support the show to get full episodes, full archive, and join the Discord community.









The Transmitter is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.



Read more about our partnership.



Sign up for Brain Inspired email alerts to be notified every time a new Brain Inspired episode is released.



To explore more neuroscience news and perspectives, visit thetransmitter.org.



Jaan Aru is a co-principal investigator of the Natural and Artificial Intelligence Lab at the University of Tartu in Estonia, where he is an associate professor. Jaan's name has kept popping up on papers I've read over the last few years, sometimes alongside other guests I've had on the podcast, like Matthew Larkum and Mac Shine. With those people and others, he has co-authored papers exploring how some of the pesky biological details of brains might be important for our subjective conscious experience, details like dendritic integration, and loops between the cortex and the thalamus. Turns out a recurring theme in his work is to connect lower-level nitty gritty biological details with higher level cognitive functioning. And he has some thoughts about what that might mean for the prospects of consciousness in  artificial systems. And we also touch on his more recent interest in understanding the brain basis of insight and creativity, connecting some of the more mundane kinds of insights during problem solving, for example, with some of the more profound kinds of insights during mystical and psychedelic experiences, for example.




Natural & Artificial Intelligence Lab



Social: @jaanaru.bsky.social



Related papers

The feasibility of artificial consciousness through the lens of neuroscience



On biological and artificial consciousness: A case for biological computationalism



Cellular mechanisms of conscious processing.



Realization experiences: a convergent account of insight and mystical experiences.






0:00 - Intro
4:21 - Jaan's approach
8:51 - Likelihood of machine consciousness
18:58 - Across-levels understanding
30:23 - Intelligence vs consciousness
36:27 - Connecting low-level implementation to cognition
45:42 - Organization and constraints
52:28 - Thalamocortical loops
1:04:18 - Artificial consciousness
1:14:34 - Theories of consciousness
1:23:16 - Creativity and insight
1:37:26 - Science research in Estonia]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 231 Jaan Aru: Conscious AI? Not Even Close!]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes, full archive, and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p><em>The Transmitter</em> is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.</p>



<p>Read more about <a href="https://www.thetransmitter.org/partners/">our partnership</a>.</p>



<p>Sign up for <a href="https://www.thetransmitter.org/newsletters/">Brain Inspired email alerts</a> to be notified every time a new Brain Inspired episode is released.</p>



<p>To explore more neuroscience news and perspectives, visit <a href="http://thetransmitter.org">thetransmitter.org</a>.</p>



<p>Jaan Aru is a co-principal investigator of the Natural and Artificial Intelligence Lab at the University of Tartu in Estonia, where he is an associate professor. Jaan's name has kept popping up on papers I've read over the last few years, sometimes alongside other guests I've had on the podcast, like <a href="https://braininspired.co/podcast/138/">Matthew Larkum</a> and <a href="https://braininspired.co/podcast/121/">Mac Shine</a>. With those people and others, he has co-authored papers exploring how some of the pesky biological details of brains might be important for our subjective conscious experience, details like dendritic integration, and loops between the cortex and the thalamus. Turns out a recurring theme in his work is to connect lower-level nitty gritty biological details with higher level cognitive functioning. And he has some thoughts about what that might mean for the prospects of consciousness in  artificial systems. And we also touch on his more recent interest in understanding the brain basis of insight and creativity, connecting some of the more mundane kinds of insights during problem solving, for example, with some of the more profound kinds of insights during mystical and psychedelic experiences, for example.</p>



<ul class="wp-block-list">
<li><a href="https://nail.cs.ut.ee/">Natural &amp; Artificial Intelligence Lab</a></li>



<li>Social: <a href="https://bsky.app/profile/did:plc:pesund73gzmi4skufhb2mtye">@jaanaru.bsky.social</a></li>



<li>Related papers
<ul class="wp-block-list">
<li><a href="https://www.sciencedirect.com/science/article/abs/pii/S0166223623002278">The feasibility of artificial consciousness through the lens of neuroscience</a></li>



<li><a href="https://www.sciencedirect.com/science/article/pii/S0149763425005251">On biological and artificial consciousness: A case for biological computationalism</a></li>



<li><a href="https://www.cell.com/trends/cognitive-sciences/fulltext/S1364-6613(20)30175-3">Cellular mechanisms of conscious processing</a>.</li>



<li><a href="https://www.tandfonline.com/doi/abs/10.1080/09515089.2026.2613030">Realization experiences: a convergent account of insight and mystical experiences</a>.</li>
</ul>
</li>
</ul>



<p>0:00 - Intro
4:21 - Jaan's approach
8:51 - Likelihood of machine consciousness
18:58 - Across-levels understanding
30:23 - Intelligence vs consciousness
36:27 - Connecting low-level implementation to cognition
45:42 - Organization and constraints
52:28 - Thalamocortical loops
1:04:18 - Artificial consciousness
1:14:34 - Theories of consciousness
1:23:16 - Creativity and insight
1:37:26 - Science research in Estonia</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/2358658/c1e-9077s2wr8jt46dx3-jpqpq893trzw-0nsugf.mp3" length="104932933"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Support the show to get full episodes, full archive, and join the Discord community.









The Transmitter is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.



Read more about our partnership.



Sign up for Brain Inspired email alerts to be notified every time a new Brain Inspired episode is released.



To explore more neuroscience news and perspectives, visit thetransmitter.org.



Jaan Aru is a co-principal investigator of the Natural and Artificial Intelligence Lab at the University of Tartu in Estonia, where he is an associate professor. Jaan's name has kept popping up on papers I've read over the last few years, sometimes alongside other guests I've had on the podcast, like Matthew Larkum and Mac Shine. With those people and others, he has co-authored papers exploring how some of the pesky biological details of brains might be important for our subjective conscious experience, details like dendritic integration, and loops between the cortex and the thalamus. Turns out a recurring theme in his work is to connect lower-level nitty gritty biological details with higher level cognitive functioning. And he has some thoughts about what that might mean for the prospects of consciousness in  artificial systems. And we also touch on his more recent interest in understanding the brain basis of insight and creativity, connecting some of the more mundane kinds of insights during problem solving, for example, with some of the more profound kinds of insights during mystical and psychedelic experiences, for example.




Natural & Artificial Intelligence Lab



Social: @jaanaru.bsky.social



Related papers

The feasibility of artificial consciousness through the lens of neuroscience



On biological and artificial consciousness: A case for biological computationalism



Cellular mechanisms of conscious processing.



Realization experiences: a convergent account of insight and mystical experiences.






0:00 - Intro
4:21 - Jaan's approach
8:51 - Likelihood of machine consciousness
18:58 - Across-levels understanding
30:23 - Intelligence vs consciousness
36:27 - Connecting low-level implementation to cognition
45:42 - Organization and constraints
52:28 - Thalamocortical loops
1:04:18 - Artificial consciousness
1:14:34 - Theories of consciousness
1:23:16 - Creativity and insight
1:37:26 - Science research in Estonia]]>
                </itunes:summary>
                                                                            <itunes:duration>01:48:03</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 230 Michael Shadlen: How Thoughts Become Conscious]]>
                </title>
                <pubDate>Wed, 28 Jan 2026 05:00:00 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/2340665</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-230-michael-shadlen-how-thoughts-become-conscious</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes, full archive, and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p><em>The Transmitter</em> is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.</p>



<p>Read more about <a href="https://www.thetransmitter.org/partners/">our partnership</a>.</p>



<p>Sign up for <a href="https://www.thetransmitter.org/newsletters/">Brain Inspired email alerts</a> to be notified every time a new Brain Inspired episode is released.</p>



<p>To explore more neuroscience news and perspectives, visit <a href="http://thetransmitter.org">thetransmitter.org</a>.</p>



<p>Michael Shadlen is a professor of neuroscience in the Department of Neuroscience at Columbia University, where he's the principle investigator of the Shadlen Lab. If you study the neural basis of decision making, you already know Shadlen's extensive research, because you are constantly referring to it if you're not already in his lab doing the work. The name Shadlen adorns many many papers relating the behavior and neural activity during decision-making to mathematical models in the drift diffusion family of models. That's not the only work he is known for,</p>



<p>As you may have gleaned from those little intro clips, Michael is with me today to discuss his account of what makes a thought conscious, in the hopes to inspire neuroscience research to eventually tackle the hard problem of consciousness - why and how we have subjective experience.</p>



<p>But Mike's account isn't an account of just consciousness. It's an account of nonconscious thought and conscious thought, and how thoughts go from non-conscious to conscious</p>



<p>His account is inspired by multiple sources and lines of reasoning.</p>



<p>Partly, Shadlen refers to philosophical accounts of cognition by people like Marleau-Ponty and James Gibson, appreciating the embodied and ecological aspects of cognition.</p>



<p>And much of his account derives from his own decades of research studying the neural basis of decision-making mostly using perceptual choice tasks where animals make eye movements to report their decisions.</p>



<p>So we discuss some of that, including what we continue to learn about neurobiological, neurophysiological, and anatomical details of brains, and the possibility of AI consciousness, given Shadlen's account.</p>



<ul class="wp-block-list">
<li><a href="https://shadlenlab.zi.columbia.edu/">Shadlen Lab</a>.</li>



<li>Twitter: <a href="https://x.com/shadlen">@shadlen</a>.</li>



<li><a href="https://braininspired.co/wp-content/uploads/2026/01/ShadlenM_Kandel-Ch56_1392-1416.pdf" target="_blank" rel="noreferrer noopener">Decision Making and Consciousness</a> (Chapter in upcoming Principles of Neuroscience textbook).</li>



<li>Talk: <a href="https://www.youtube.com/watch?v=vvvqyUf0BQc">Decision Making as a Model of thought</a></li>
</ul>



<p>Read the <a href="https://www.thetransmitter.org/wp-content/uploads/2026/01/BI-230-transcript-michael-shadlen.pdf" target="_blank" rel="noreferrer noopener">transcript</a>.</p>



<p>0:00 - Intro
7:05 - Overview of Mike's account
9:10 - Thought as interrogation
21:03 - Neurons and thoughts
27:05 - Why so many neurons?
36:21 - Evolution of Mike's thinking
39:48 - Marleau-Ponty, cognition, and meaning
44:54 - Naturalistic tasks
51:11 - Consciousness
58:01 - Martin Buber and relational consciousness
1:00:18 - Social and conscious phenomena correlated
1:04:17 - Function vs. nature of consciousness
1:06:05 - Did language evolve because of consciousness?
1:11:11 - Weak phenomenology and long-range feedback
1:22:02 - How does interrogation work in the bra...</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Support the show to get full episodes, full archive, and join the Discord community.









The Transmitter is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.



Read more about our partnership.



Sign up for Brain Inspired email alerts to be notified every time a new Brain Inspired episode is released.



To explore more neuroscience news and perspectives, visit thetransmitter.org.



Michael Shadlen is a professor of neuroscience in the Department of Neuroscience at Columbia University, where he's the principle investigator of the Shadlen Lab. If you study the neural basis of decision making, you already know Shadlen's extensive research, because you are constantly referring to it if you're not already in his lab doing the work. The name Shadlen adorns many many papers relating the behavior and neural activity during decision-making to mathematical models in the drift diffusion family of models. That's not the only work he is known for,



As you may have gleaned from those little intro clips, Michael is with me today to discuss his account of what makes a thought conscious, in the hopes to inspire neuroscience research to eventually tackle the hard problem of consciousness - why and how we have subjective experience.



But Mike's account isn't an account of just consciousness. It's an account of nonconscious thought and conscious thought, and how thoughts go from non-conscious to conscious



His account is inspired by multiple sources and lines of reasoning.



Partly, Shadlen refers to philosophical accounts of cognition by people like Marleau-Ponty and James Gibson, appreciating the embodied and ecological aspects of cognition.



And much of his account derives from his own decades of research studying the neural basis of decision-making mostly using perceptual choice tasks where animals make eye movements to report their decisions.



So we discuss some of that, including what we continue to learn about neurobiological, neurophysiological, and anatomical details of brains, and the possibility of AI consciousness, given Shadlen's account.




Shadlen Lab.



Twitter: @shadlen.



Decision Making and Consciousness (Chapter in upcoming Principles of Neuroscience textbook).



Talk: Decision Making as a Model of thought




Read the transcript.



0:00 - Intro
7:05 - Overview of Mike's account
9:10 - Thought as interrogation
21:03 - Neurons and thoughts
27:05 - Why so many neurons?
36:21 - Evolution of Mike's thinking
39:48 - Marleau-Ponty, cognition, and meaning
44:54 - Naturalistic tasks
51:11 - Consciousness
58:01 - Martin Buber and relational consciousness
1:00:18 - Social and conscious phenomena correlated
1:04:17 - Function vs. nature of consciousness
1:06:05 - Did language evolve because of consciousness?
1:11:11 - Weak phenomenology and long-range feedback
1:22:02 - How does interrogation work in the bra...]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 230 Michael Shadlen: How Thoughts Become Conscious]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes, full archive, and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p><em>The Transmitter</em> is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.</p>



<p>Read more about <a href="https://www.thetransmitter.org/partners/">our partnership</a>.</p>



<p>Sign up for <a href="https://www.thetransmitter.org/newsletters/">Brain Inspired email alerts</a> to be notified every time a new Brain Inspired episode is released.</p>



<p>To explore more neuroscience news and perspectives, visit <a href="http://thetransmitter.org">thetransmitter.org</a>.</p>



<p>Michael Shadlen is a professor of neuroscience in the Department of Neuroscience at Columbia University, where he's the principle investigator of the Shadlen Lab. If you study the neural basis of decision making, you already know Shadlen's extensive research, because you are constantly referring to it if you're not already in his lab doing the work. The name Shadlen adorns many many papers relating the behavior and neural activity during decision-making to mathematical models in the drift diffusion family of models. That's not the only work he is known for,</p>



<p>As you may have gleaned from those little intro clips, Michael is with me today to discuss his account of what makes a thought conscious, in the hopes to inspire neuroscience research to eventually tackle the hard problem of consciousness - why and how we have subjective experience.</p>



<p>But Mike's account isn't an account of just consciousness. It's an account of nonconscious thought and conscious thought, and how thoughts go from non-conscious to conscious</p>



<p>His account is inspired by multiple sources and lines of reasoning.</p>



<p>Partly, Shadlen refers to philosophical accounts of cognition by people like Marleau-Ponty and James Gibson, appreciating the embodied and ecological aspects of cognition.</p>



<p>And much of his account derives from his own decades of research studying the neural basis of decision-making mostly using perceptual choice tasks where animals make eye movements to report their decisions.</p>



<p>So we discuss some of that, including what we continue to learn about neurobiological, neurophysiological, and anatomical details of brains, and the possibility of AI consciousness, given Shadlen's account.</p>



<ul class="wp-block-list">
<li><a href="https://shadlenlab.zi.columbia.edu/">Shadlen Lab</a>.</li>



<li>Twitter: <a href="https://x.com/shadlen">@shadlen</a>.</li>



<li><a href="https://braininspired.co/wp-content/uploads/2026/01/ShadlenM_Kandel-Ch56_1392-1416.pdf" target="_blank" rel="noreferrer noopener">Decision Making and Consciousness</a> (Chapter in upcoming Principles of Neuroscience textbook).</li>



<li>Talk: <a href="https://www.youtube.com/watch?v=vvvqyUf0BQc">Decision Making as a Model of thought</a></li>
</ul>



<p>Read the <a href="https://www.thetransmitter.org/wp-content/uploads/2026/01/BI-230-transcript-michael-shadlen.pdf" target="_blank" rel="noreferrer noopener">transcript</a>.</p>



<p>0:00 - Intro
7:05 - Overview of Mike's account
9:10 - Thought as interrogation
21:03 - Neurons and thoughts
27:05 - Why so many neurons?
36:21 - Evolution of Mike's thinking
39:48 - Marleau-Ponty, cognition, and meaning
44:54 - Naturalistic tasks
51:11 - Consciousness
58:01 - Martin Buber and relational consciousness
1:00:18 - Social and conscious phenomena correlated
1:04:17 - Function vs. nature of consciousness
1:06:05 - Did language evolve because of consciousness?
1:11:11 - Weak phenomenology and long-range feedback
1:22:02 - How does interrogation work in the brain?
1:26:18 - AI consciousness
1:35:49 - The hard problem of consciousness
1:39:34 - Meditation and flow</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/2340665/c1e-z9xxc323poc20759-jpqnzm2kck6z-6uhcul.mp3" length="105636564"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Support the show to get full episodes, full archive, and join the Discord community.









The Transmitter is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.



Read more about our partnership.



Sign up for Brain Inspired email alerts to be notified every time a new Brain Inspired episode is released.



To explore more neuroscience news and perspectives, visit thetransmitter.org.



Michael Shadlen is a professor of neuroscience in the Department of Neuroscience at Columbia University, where he's the principle investigator of the Shadlen Lab. If you study the neural basis of decision making, you already know Shadlen's extensive research, because you are constantly referring to it if you're not already in his lab doing the work. The name Shadlen adorns many many papers relating the behavior and neural activity during decision-making to mathematical models in the drift diffusion family of models. That's not the only work he is known for,



As you may have gleaned from those little intro clips, Michael is with me today to discuss his account of what makes a thought conscious, in the hopes to inspire neuroscience research to eventually tackle the hard problem of consciousness - why and how we have subjective experience.



But Mike's account isn't an account of just consciousness. It's an account of nonconscious thought and conscious thought, and how thoughts go from non-conscious to conscious



His account is inspired by multiple sources and lines of reasoning.



Partly, Shadlen refers to philosophical accounts of cognition by people like Marleau-Ponty and James Gibson, appreciating the embodied and ecological aspects of cognition.



And much of his account derives from his own decades of research studying the neural basis of decision-making mostly using perceptual choice tasks where animals make eye movements to report their decisions.



So we discuss some of that, including what we continue to learn about neurobiological, neurophysiological, and anatomical details of brains, and the possibility of AI consciousness, given Shadlen's account.




Shadlen Lab.



Twitter: @shadlen.



Decision Making and Consciousness (Chapter in upcoming Principles of Neuroscience textbook).



Talk: Decision Making as a Model of thought




Read the transcript.



0:00 - Intro
7:05 - Overview of Mike's account
9:10 - Thought as interrogation
21:03 - Neurons and thoughts
27:05 - Why so many neurons?
36:21 - Evolution of Mike's thinking
39:48 - Marleau-Ponty, cognition, and meaning
44:54 - Naturalistic tasks
51:11 - Consciousness
58:01 - Martin Buber and relational consciousness
1:00:18 - Social and conscious phenomena correlated
1:04:17 - Function vs. nature of consciousness
1:06:05 - Did language evolve because of consciousness?
1:11:11 - Weak phenomenology and long-range feedback
1:22:02 - How does interrogation work in the bra...]]>
                </itunes:summary>
                                                                            <itunes:duration>01:48:30</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 229 Tomaso Poggio: Principles of Intelligence and Learning]]>
                </title>
                <pubDate>Wed, 14 Jan 2026 05:00:00 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/2322542</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-229-tomaso-poggio-principles-of-intelligence-and-learning</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes, full archive, and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p><em>The Transmitter</em> is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.</p>



<p>Read more about <a href="https://www.thetransmitter.org/partners/">our partnership</a>.</p>



<p>Sign up for <a href="https://www.thetransmitter.org/newsletters/">Brain Inspired email alerts</a> to be notified every time a new Brain Inspired episode is released.</p>



<p>To explore more neuroscience news and perspectives, visit <a href="http://thetransmitter.org">thetransmitter.org</a>.</p>



<p>Tomaso Poggio is the Eugene McDermott professor in the Department of Brain and Cognitive Sciences, an investigator at the McGovern Institute for Brain Research, a member of the MIT Computer Science and Artificial Intelligence Laboratory (CSAIL) and director of both the Center for Biological and Computational Learning at MIT and the Center for Brains, Minds, and Machines.</p>



<p>Tomaso believes we are in-between building and understanding useful AI That is, we are in between engineering and theory. He likens this stage to the period after Volta invented the battery and Maxwell developed the equations of electromagnetism. Tomaso has worked for decades on the theory and principles behind intelligence and learning in brains and machines. I first learned of him via his work with David Marr, in which they developed "Marr's levels" of analysis that frame explanation in terms of computation/function, algorithms, and implementation. Since then Tomaso has added "learning" as a crucial fourth level. I will refer to you his autobiography to learn more about the many influential people and projects he has worked with and on, the theorems he and others have proved to discover principles of intelligence, and his broader thoughts and reflections.</p>



<p>Right now, he is focused on the principles of compositional sparsity and genericity to explain how deep learning networks can (computationally) efficiently learn useful representations to solve tasks.</p>



<ul class="wp-block-list">
<li><a href="https://poggio-lab.mit.edu/lab/">Lab website</a>.</li>



<li><a href="https://dspace.mit.edu/bitstream/handle/1721.1/70970/mit-csail-tr-2012-014.pdf">Tomaso's Autobiography</a> </li>



<li>Related papers
<ul class="wp-block-list">
<li><a href="https://arxiv.org/abs/2507.02550">Position: A Theory of Deep Learning Must Include Compositional Sparsity</a></li>



<li><a href="https://dspace.mit.edu/bitstream/handle/1721.1/70970/mit-csail-tr-2012-014.pdf">The Levels of Understanding framework, revised</a></li>
</ul>
</li>



<li>Blog post:
<ul class="wp-block-list">
<li><a href="https://poggio-lab.mit.edu/blog/">Poggio lab blog</a>.</li>



<li><a href="https://poggio-lab.mit.edu/the-missing-foundations-of-intelligence/">The Missing Foundations of Intelligence</a></li>
</ul>
</li>
</ul>



<p><a href="https://www.thetransmitter.org/wp-content/uploads/2026/01/BI-229-transcript-tomaso-Poggio.pdf" target="_blank" rel="noreferrer noopener">Read the transcript.</a></p>



<p>0:00 - Intro
9:04 - Learning as the fourth level of Marr's levels
12:34 - Engineering then theory (Volta to Maxwell)
19:23 - Does AI need theory?
26:29 - Learning as the door to intelligence
38:30 - Learning in the brain vs backpropagation
40:45 - Compositional sparsity
49:57 - Math vs computer science
56:50 - Generalizability
1:04:41 - Sparse compositionality in brains?
1:07:33 - Theory vs experiment
1:09:46 - Who needs deep learning theory?
1:19:51 - Does theory really help? Patreon
1:28:54 - Outlook</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Support the show to get full episodes, full archive, and join the Discord community.









The Transmitter is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.



Read more about our partnership.



Sign up for Brain Inspired email alerts to be notified every time a new Brain Inspired episode is released.



To explore more neuroscience news and perspectives, visit thetransmitter.org.



Tomaso Poggio is the Eugene McDermott professor in the Department of Brain and Cognitive Sciences, an investigator at the McGovern Institute for Brain Research, a member of the MIT Computer Science and Artificial Intelligence Laboratory (CSAIL) and director of both the Center for Biological and Computational Learning at MIT and the Center for Brains, Minds, and Machines.



Tomaso believes we are in-between building and understanding useful AI That is, we are in between engineering and theory. He likens this stage to the period after Volta invented the battery and Maxwell developed the equations of electromagnetism. Tomaso has worked for decades on the theory and principles behind intelligence and learning in brains and machines. I first learned of him via his work with David Marr, in which they developed "Marr's levels" of analysis that frame explanation in terms of computation/function, algorithms, and implementation. Since then Tomaso has added "learning" as a crucial fourth level. I will refer to you his autobiography to learn more about the many influential people and projects he has worked with and on, the theorems he and others have proved to discover principles of intelligence, and his broader thoughts and reflections.



Right now, he is focused on the principles of compositional sparsity and genericity to explain how deep learning networks can (computationally) efficiently learn useful representations to solve tasks.




Lab website.



Tomaso's Autobiography 



Related papers

Position: A Theory of Deep Learning Must Include Compositional Sparsity



The Levels of Understanding framework, revised





Blog post:

Poggio lab blog.



The Missing Foundations of Intelligence






Read the transcript.



0:00 - Intro
9:04 - Learning as the fourth level of Marr's levels
12:34 - Engineering then theory (Volta to Maxwell)
19:23 - Does AI need theory?
26:29 - Learning as the door to intelligence
38:30 - Learning in the brain vs backpropagation
40:45 - Compositional sparsity
49:57 - Math vs computer science
56:50 - Generalizability
1:04:41 - Sparse compositionality in brains?
1:07:33 - Theory vs experiment
1:09:46 - Who needs deep learning theory?
1:19:51 - Does theory really help? Patreon
1:28:54 - Outlook]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 229 Tomaso Poggio: Principles of Intelligence and Learning]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes, full archive, and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p><em>The Transmitter</em> is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.</p>



<p>Read more about <a href="https://www.thetransmitter.org/partners/">our partnership</a>.</p>



<p>Sign up for <a href="https://www.thetransmitter.org/newsletters/">Brain Inspired email alerts</a> to be notified every time a new Brain Inspired episode is released.</p>



<p>To explore more neuroscience news and perspectives, visit <a href="http://thetransmitter.org">thetransmitter.org</a>.</p>



<p>Tomaso Poggio is the Eugene McDermott professor in the Department of Brain and Cognitive Sciences, an investigator at the McGovern Institute for Brain Research, a member of the MIT Computer Science and Artificial Intelligence Laboratory (CSAIL) and director of both the Center for Biological and Computational Learning at MIT and the Center for Brains, Minds, and Machines.</p>



<p>Tomaso believes we are in-between building and understanding useful AI That is, we are in between engineering and theory. He likens this stage to the period after Volta invented the battery and Maxwell developed the equations of electromagnetism. Tomaso has worked for decades on the theory and principles behind intelligence and learning in brains and machines. I first learned of him via his work with David Marr, in which they developed "Marr's levels" of analysis that frame explanation in terms of computation/function, algorithms, and implementation. Since then Tomaso has added "learning" as a crucial fourth level. I will refer to you his autobiography to learn more about the many influential people and projects he has worked with and on, the theorems he and others have proved to discover principles of intelligence, and his broader thoughts and reflections.</p>



<p>Right now, he is focused on the principles of compositional sparsity and genericity to explain how deep learning networks can (computationally) efficiently learn useful representations to solve tasks.</p>



<ul class="wp-block-list">
<li><a href="https://poggio-lab.mit.edu/lab/">Lab website</a>.</li>



<li><a href="https://dspace.mit.edu/bitstream/handle/1721.1/70970/mit-csail-tr-2012-014.pdf">Tomaso's Autobiography</a> </li>



<li>Related papers
<ul class="wp-block-list">
<li><a href="https://arxiv.org/abs/2507.02550">Position: A Theory of Deep Learning Must Include Compositional Sparsity</a></li>



<li><a href="https://dspace.mit.edu/bitstream/handle/1721.1/70970/mit-csail-tr-2012-014.pdf">The Levels of Understanding framework, revised</a></li>
</ul>
</li>



<li>Blog post:
<ul class="wp-block-list">
<li><a href="https://poggio-lab.mit.edu/blog/">Poggio lab blog</a>.</li>



<li><a href="https://poggio-lab.mit.edu/the-missing-foundations-of-intelligence/">The Missing Foundations of Intelligence</a></li>
</ul>
</li>
</ul>



<p><a href="https://www.thetransmitter.org/wp-content/uploads/2026/01/BI-229-transcript-tomaso-Poggio.pdf" target="_blank" rel="noreferrer noopener">Read the transcript.</a></p>



<p>0:00 - Intro
9:04 - Learning as the fourth level of Marr's levels
12:34 - Engineering then theory (Volta to Maxwell)
19:23 - Does AI need theory?
26:29 - Learning as the door to intelligence
38:30 - Learning in the brain vs backpropagation
40:45 - Compositional sparsity
49:57 - Math vs computer science
56:50 - Generalizability
1:04:41 - Sparse compositionality in brains?
1:07:33 - Theory vs experiment
1:09:46 - Who needs deep learning theory?
1:19:51 - Does theory really help? Patreon
1:28:54 - Outlook</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/2322542/c1e-gkoohr44vjsz85v8-okp37xz8up5m-obpxob.mp3" length="98338122"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Support the show to get full episodes, full archive, and join the Discord community.









The Transmitter is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.



Read more about our partnership.



Sign up for Brain Inspired email alerts to be notified every time a new Brain Inspired episode is released.



To explore more neuroscience news and perspectives, visit thetransmitter.org.



Tomaso Poggio is the Eugene McDermott professor in the Department of Brain and Cognitive Sciences, an investigator at the McGovern Institute for Brain Research, a member of the MIT Computer Science and Artificial Intelligence Laboratory (CSAIL) and director of both the Center for Biological and Computational Learning at MIT and the Center for Brains, Minds, and Machines.



Tomaso believes we are in-between building and understanding useful AI That is, we are in between engineering and theory. He likens this stage to the period after Volta invented the battery and Maxwell developed the equations of electromagnetism. Tomaso has worked for decades on the theory and principles behind intelligence and learning in brains and machines. I first learned of him via his work with David Marr, in which they developed "Marr's levels" of analysis that frame explanation in terms of computation/function, algorithms, and implementation. Since then Tomaso has added "learning" as a crucial fourth level. I will refer to you his autobiography to learn more about the many influential people and projects he has worked with and on, the theorems he and others have proved to discover principles of intelligence, and his broader thoughts and reflections.



Right now, he is focused on the principles of compositional sparsity and genericity to explain how deep learning networks can (computationally) efficiently learn useful representations to solve tasks.




Lab website.



Tomaso's Autobiography 



Related papers

Position: A Theory of Deep Learning Must Include Compositional Sparsity



The Levels of Understanding framework, revised





Blog post:

Poggio lab blog.



The Missing Foundations of Intelligence






Read the transcript.



0:00 - Intro
9:04 - Learning as the fourth level of Marr's levels
12:34 - Engineering then theory (Volta to Maxwell)
19:23 - Does AI need theory?
26:29 - Learning as the door to intelligence
38:30 - Learning in the brain vs backpropagation
40:45 - Compositional sparsity
49:57 - Math vs computer science
56:50 - Generalizability
1:04:41 - Sparse compositionality in brains?
1:07:33 - Theory vs experiment
1:09:46 - Who needs deep learning theory?
1:19:51 - Does theory really help? Patreon
1:28:54 - Outlook]]>
                </itunes:summary>
                                                                            <itunes:duration>01:41:00</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 228 Alex Maier: Laws of Consciousness]]>
                </title>
                <pubDate>Wed, 31 Dec 2025 05:00:00 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/2309376</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-228-alex-maier-laws-of-consciousness</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes, full archive, and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p><em>The Transmitter</em> is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.</p>



<p>Read more about <a href="https://www.thetransmitter.org/partners/">our partnership</a>.</p>



<p>Sign up for <a href="https://www.thetransmitter.org/newsletters/">Brain Inspired email alerts</a> to be notified every time a new Brain Inspired episode is released.</p>



<p>To explore more neuroscience news and perspectives, visit <a href="http://thetransmitter.org">thetransmitter.org</a>.</p>



<p>Alex is an associate professor of psychology at Vanderbilt University where he heads the Maier Lab. His work in neuroscience spans vision, visual perception, and cognition, studying the neurophysiology of cortical columns, and other related topics. Today, he is here to discuss where his focus has shifted over the past few years, the neuroscience of consciousness. I should say shifted back, since that was his original love, which you'll hear about.</p>



<p>I've known Alex since my own time at Vanderbilt, where I was a postdoc and he was a new faculty member, and I remember being impressed with him then. I was at a talk he gave - job talk or early talk - where it was immediately obvious how passionate and articulate he is about what he does, and I remember he even showed off some of his telescope photography - good pictures of the moon, I remember. Anyway, we always had fun interactions, even if sometimes it was a quick hello as he ran up stairs and down hallways to get wherever he was going, always in a hurry.</p>



<p>Today we discuss why Alex sees integration information theory as the most viable current prospect for explaining consciousness. That is mainly because IIT has developed a formalized mathematical account that hopes to do for consciousness what other math has done for physics, that is, give us what we know as laws of nature. So basically our discussion revolves around everything related to that, like philosophy of science, distinguishing mathematics from "the mathematical", some of the tools he is finding valuable, like category theory, and some of his work measuring the level of consciousness IIT says a whole soccer team has, not just the individuals that comprise the team.</p>



<ul class="wp-block-list">
<li><a href="https://maierlab.wiki/">Maier Lab</a></li>



<li><a href="https://www.youtube.com/@astonishinghypothesis/featured">Astonishing Hypothesis</a> (Alex's youtube channel)</li>



<li>Twitter: </li>



<li><a href="https://maierav.github.io/sensation/">Sensation and Perception</a> textbook (in-the-making)</li>



<li>Related papers
<ul class="wp-block-list">
<li><a href="https://osf.io/preprints/psyarxiv/cdjpf_v1">Linking the Structure of Neuronal Mechanisms to the Structure of Qualia</a></li>



<li><a href="https://www.sciencedirect.com/science/article/pii/S0960077925016406">Information integration and the latent consciousness of human groups</a></li>



<li><a href="https://arxiv.org/abs/2504.09614">Neural mechanisms of predictive processing: a collaborative community experiment through the OpenScope program</a></li>
</ul>
</li>
</ul>



<ul class="wp-block-list">
<li>Various things Alex mentioned:
<ul class="wp-block-list">
<li><a href="https://www.youtube.com/watch?v=JVDRB0GtHqE">“An Antiphilosophy of Mathematics,” Peter J. Freyd</a> youtube video about "the mathematical".</li>



<li><a href="https://www.youtube.com/playlist?list=PLUl4u3cNGP63bAfjGas3TuA4ZCPUtN6Xf">David Kaiser's playlist on modern physics</a>.</li>
</ul>
</li>



<li>Here's a link to t...</li></ul>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Support the show to get full episodes, full archive, and join the Discord community.









The Transmitter is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.



Read more about our partnership.



Sign up for Brain Inspired email alerts to be notified every time a new Brain Inspired episode is released.



To explore more neuroscience news and perspectives, visit thetransmitter.org.



Alex is an associate professor of psychology at Vanderbilt University where he heads the Maier Lab. His work in neuroscience spans vision, visual perception, and cognition, studying the neurophysiology of cortical columns, and other related topics. Today, he is here to discuss where his focus has shifted over the past few years, the neuroscience of consciousness. I should say shifted back, since that was his original love, which you'll hear about.



I've known Alex since my own time at Vanderbilt, where I was a postdoc and he was a new faculty member, and I remember being impressed with him then. I was at a talk he gave - job talk or early talk - where it was immediately obvious how passionate and articulate he is about what he does, and I remember he even showed off some of his telescope photography - good pictures of the moon, I remember. Anyway, we always had fun interactions, even if sometimes it was a quick hello as he ran up stairs and down hallways to get wherever he was going, always in a hurry.



Today we discuss why Alex sees integration information theory as the most viable current prospect for explaining consciousness. That is mainly because IIT has developed a formalized mathematical account that hopes to do for consciousness what other math has done for physics, that is, give us what we know as laws of nature. So basically our discussion revolves around everything related to that, like philosophy of science, distinguishing mathematics from "the mathematical", some of the tools he is finding valuable, like category theory, and some of his work measuring the level of consciousness IIT says a whole soccer team has, not just the individuals that comprise the team.




Maier Lab



Astonishing Hypothesis (Alex's youtube channel)



Twitter: 



Sensation and Perception textbook (in-the-making)



Related papers

Linking the Structure of Neuronal Mechanisms to the Structure of Qualia



Information integration and the latent consciousness of human groups



Neural mechanisms of predictive processing: a collaborative community experiment through the OpenScope program







Various things Alex mentioned:

“An Antiphilosophy of Mathematics,” Peter J. Freyd youtube video about "the mathematical".



David Kaiser's playlist on modern physics.





Here's a link to t...]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 228 Alex Maier: Laws of Consciousness]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes, full archive, and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p><em>The Transmitter</em> is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.</p>



<p>Read more about <a href="https://www.thetransmitter.org/partners/">our partnership</a>.</p>



<p>Sign up for <a href="https://www.thetransmitter.org/newsletters/">Brain Inspired email alerts</a> to be notified every time a new Brain Inspired episode is released.</p>



<p>To explore more neuroscience news and perspectives, visit <a href="http://thetransmitter.org">thetransmitter.org</a>.</p>



<p>Alex is an associate professor of psychology at Vanderbilt University where he heads the Maier Lab. His work in neuroscience spans vision, visual perception, and cognition, studying the neurophysiology of cortical columns, and other related topics. Today, he is here to discuss where his focus has shifted over the past few years, the neuroscience of consciousness. I should say shifted back, since that was his original love, which you'll hear about.</p>



<p>I've known Alex since my own time at Vanderbilt, where I was a postdoc and he was a new faculty member, and I remember being impressed with him then. I was at a talk he gave - job talk or early talk - where it was immediately obvious how passionate and articulate he is about what he does, and I remember he even showed off some of his telescope photography - good pictures of the moon, I remember. Anyway, we always had fun interactions, even if sometimes it was a quick hello as he ran up stairs and down hallways to get wherever he was going, always in a hurry.</p>



<p>Today we discuss why Alex sees integration information theory as the most viable current prospect for explaining consciousness. That is mainly because IIT has developed a formalized mathematical account that hopes to do for consciousness what other math has done for physics, that is, give us what we know as laws of nature. So basically our discussion revolves around everything related to that, like philosophy of science, distinguishing mathematics from "the mathematical", some of the tools he is finding valuable, like category theory, and some of his work measuring the level of consciousness IIT says a whole soccer team has, not just the individuals that comprise the team.</p>



<ul class="wp-block-list">
<li><a href="https://maierlab.wiki/">Maier Lab</a></li>



<li><a href="https://www.youtube.com/@astonishinghypothesis/featured">Astonishing Hypothesis</a> (Alex's youtube channel)</li>



<li>Twitter: </li>



<li><a href="https://maierav.github.io/sensation/">Sensation and Perception</a> textbook (in-the-making)</li>



<li>Related papers
<ul class="wp-block-list">
<li><a href="https://osf.io/preprints/psyarxiv/cdjpf_v1">Linking the Structure of Neuronal Mechanisms to the Structure of Qualia</a></li>



<li><a href="https://www.sciencedirect.com/science/article/pii/S0960077925016406">Information integration and the latent consciousness of human groups</a></li>



<li><a href="https://arxiv.org/abs/2504.09614">Neural mechanisms of predictive processing: a collaborative community experiment through the OpenScope program</a></li>
</ul>
</li>
</ul>



<ul class="wp-block-list">
<li>Various things Alex mentioned:
<ul class="wp-block-list">
<li><a href="https://www.youtube.com/watch?v=JVDRB0GtHqE">“An Antiphilosophy of Mathematics,” Peter J. Freyd</a> youtube video about "the mathematical".</li>



<li><a href="https://www.youtube.com/playlist?list=PLUl4u3cNGP63bAfjGas3TuA4ZCPUtN6Xf">David Kaiser's playlist on modern physics</a>.</li>
</ul>
</li>



<li>Here's a link to the <a href="https://www.iit.wiki/">Integrated Information Theory Wiki</a>.</li>
</ul>



<p><a href="https://www.thetransmitter.org/wp-content/uploads/2026/01/BI-228-transcript-alex-maier.pdf" target="_blank" rel="noreferrer noopener">Read the transcript.</a></p>



<p>0:00 - Intro
4:27 - Discovering consciousness science
11:23 - Laws of perception
15:48 - Integrated information theory and mathematical formalism
23:54 - Theories of consciousness without math
28:18 - Computation metaphor
34:44 - Formalized mathematics is the way
36:56 - Category theory
41:42 - Structuralism
51:09 - The mathematical
54:33 - Metaphysics of the mathematical
59:52 - Yoneda Lemma
1:12:05 - What's real
1:26:22 - Measuring consciousness of a soccer team
1:35:03 - Assumptions and approximations of IIT
1:43:13 - Open science</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/2309376/c1e-d5wwam0r9wi5637z-okjr0gd4aq0g-jycm0c.mp3" length="114461139"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Support the show to get full episodes, full archive, and join the Discord community.









The Transmitter is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.



Read more about our partnership.



Sign up for Brain Inspired email alerts to be notified every time a new Brain Inspired episode is released.



To explore more neuroscience news and perspectives, visit thetransmitter.org.



Alex is an associate professor of psychology at Vanderbilt University where he heads the Maier Lab. His work in neuroscience spans vision, visual perception, and cognition, studying the neurophysiology of cortical columns, and other related topics. Today, he is here to discuss where his focus has shifted over the past few years, the neuroscience of consciousness. I should say shifted back, since that was his original love, which you'll hear about.



I've known Alex since my own time at Vanderbilt, where I was a postdoc and he was a new faculty member, and I remember being impressed with him then. I was at a talk he gave - job talk or early talk - where it was immediately obvious how passionate and articulate he is about what he does, and I remember he even showed off some of his telescope photography - good pictures of the moon, I remember. Anyway, we always had fun interactions, even if sometimes it was a quick hello as he ran up stairs and down hallways to get wherever he was going, always in a hurry.



Today we discuss why Alex sees integration information theory as the most viable current prospect for explaining consciousness. That is mainly because IIT has developed a formalized mathematical account that hopes to do for consciousness what other math has done for physics, that is, give us what we know as laws of nature. So basically our discussion revolves around everything related to that, like philosophy of science, distinguishing mathematics from "the mathematical", some of the tools he is finding valuable, like category theory, and some of his work measuring the level of consciousness IIT says a whole soccer team has, not just the individuals that comprise the team.




Maier Lab



Astonishing Hypothesis (Alex's youtube channel)



Twitter: 



Sensation and Perception textbook (in-the-making)



Related papers

Linking the Structure of Neuronal Mechanisms to the Structure of Qualia



Information integration and the latent consciousness of human groups



Neural mechanisms of predictive processing: a collaborative community experiment through the OpenScope program







Various things Alex mentioned:

“An Antiphilosophy of Mathematics,” Peter J. Freyd youtube video about "the mathematical".



David Kaiser's playlist on modern physics.





Here's a link to t...]]>
                </itunes:summary>
                                                                            <itunes:duration>01:57:54</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 227 Decoding Memories: Aspirational Neuroscience 2025]]>
                </title>
                <pubDate>Wed, 17 Dec 2025 05:00:00 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/2292029</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-227-decoding-memories-aspirational-neuroscience-2025</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes, full archive, and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p><em>The Transmitter</em> is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.</p>



<p>Read more about <a href="https://www.thetransmitter.org/partners/">our partnership</a>.</p>



<p>Sign up for <a href="https://www.thetransmitter.org/newsletters/">Brain Inspired email alerts</a> to be notified every time a new Brain Inspired episode is released.</p>



<p>To explore more neuroscience news and perspectives, visit <a href="http://thetransmitter.org">thetransmitter.org</a>.</p>



<p>Can you look at all the synaptic connections of a brain, and tell me one nontrivial memory from the organism that has that brain? If so, you shall win the $100,000 prize from the Aspirational Neuroscience group.</p>



<p>I was recently invited for the second time to chair a panel of experts to discuss that question and all the issues around that question - how to decode a non-trivial memory from a static map of synaptic connectivity.</p>



<p>Before I play that recording, let me set the stage a bit more.</p>



<p>Aspirational Neuroscience is a community of neuroscientists run by Kenneth Hayworth, with the goal, from their website, to "balance aspirational thinking with respect to the long-term implications of a successful neuroscience with practical realism about our current state of ignorance and knowledge." One of those aspirations is to decoding things - memories, learned behaviors, and so on - from static connectomes. They hold satellite events at the SfN conference, and invite experts in connectomics from academia and from industry to share their thoughts and progress that might advance that goal.</p>



<p>In this panel discussion, we touch on multiple relevant topics. One question is what is the right experimental design or designs that would answer whether we are decoding memory - what is a benchmark in various model organisms, and for various theoretical frameworks? We discuss some of the obstacles in the way, both technologically and conceptually. Like the fact that proofreading connectome connections - manually verifying and editing them - is a giant bottleneck, or like the very definition of memory, what counts as a memory, let alone a "nontrivial" memory, and so on. And they take lots of questions from the audience as well.</p>



<p>I apologize the audio is not crystal clear in this recording. I did my best to clean it up, and I take full blame for not setting up my audio recorder to capture the best sound. So, if you are a listener, I'd encourage you to check out the video version, which also has subtitles throughout for when the language isn't clear.</p>



<p>Anyway, this is a fun and smart group of people, and I look forward to another one next year I hope.</p>



<p>The last time I did this was episode 180, BI 180, which I link to in the show notes. Before that I had on Ken Hayworth, whom I mentioned runs Aspirational Neuroscience, and Randal Koene, who is on the panel this time. They were on to talk about the future possibility of uploading minds to computers based on connectomes. That was episode 103.</p>



<ul class="wp-block-list">
<li><a href="https://aspirationalneuroscience.org/">Aspirational Neuroscience</a></li>



<li>Panel
<ul class="wp-block-list">
<li><a href="https://scholar.google.com/citations?user=XSjXVbQAAAAJ&amp;hl=en">Michał Januszewski</a><ul><li><a href="https://bsky.app/profile/michalwj.bsky.social">@michalwj.bsky.social</a></li></ul>
<ul class="wp-block-list">
<li>Research scientist (connectomics) with Google Research, automated neural trac...</li></ul></li></ul></li></ul>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Support the show to get full episodes, full archive, and join the Discord community.









The Transmitter is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.



Read more about our partnership.



Sign up for Brain Inspired email alerts to be notified every time a new Brain Inspired episode is released.



To explore more neuroscience news and perspectives, visit thetransmitter.org.



Can you look at all the synaptic connections of a brain, and tell me one nontrivial memory from the organism that has that brain? If so, you shall win the $100,000 prize from the Aspirational Neuroscience group.



I was recently invited for the second time to chair a panel of experts to discuss that question and all the issues around that question - how to decode a non-trivial memory from a static map of synaptic connectivity.



Before I play that recording, let me set the stage a bit more.



Aspirational Neuroscience is a community of neuroscientists run by Kenneth Hayworth, with the goal, from their website, to "balance aspirational thinking with respect to the long-term implications of a successful neuroscience with practical realism about our current state of ignorance and knowledge." One of those aspirations is to decoding things - memories, learned behaviors, and so on - from static connectomes. They hold satellite events at the SfN conference, and invite experts in connectomics from academia and from industry to share their thoughts and progress that might advance that goal.



In this panel discussion, we touch on multiple relevant topics. One question is what is the right experimental design or designs that would answer whether we are decoding memory - what is a benchmark in various model organisms, and for various theoretical frameworks? We discuss some of the obstacles in the way, both technologically and conceptually. Like the fact that proofreading connectome connections - manually verifying and editing them - is a giant bottleneck, or like the very definition of memory, what counts as a memory, let alone a "nontrivial" memory, and so on. And they take lots of questions from the audience as well.



I apologize the audio is not crystal clear in this recording. I did my best to clean it up, and I take full blame for not setting up my audio recorder to capture the best sound. So, if you are a listener, I'd encourage you to check out the video version, which also has subtitles throughout for when the language isn't clear.



Anyway, this is a fun and smart group of people, and I look forward to another one next year I hope.



The last time I did this was episode 180, BI 180, which I link to in the show notes. Before that I had on Ken Hayworth, whom I mentioned runs Aspirational Neuroscience, and Randal Koene, who is on the panel this time. They were on to talk about the future possibility of uploading minds to computers based on connectomes. That was episode 103.




Aspirational Neuroscience



Panel

Michał Januszewski@michalwj.bsky.social

Research scientist (connectomics) with Google Research, automated neural trac...]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 227 Decoding Memories: Aspirational Neuroscience 2025]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes, full archive, and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p><em>The Transmitter</em> is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.</p>



<p>Read more about <a href="https://www.thetransmitter.org/partners/">our partnership</a>.</p>



<p>Sign up for <a href="https://www.thetransmitter.org/newsletters/">Brain Inspired email alerts</a> to be notified every time a new Brain Inspired episode is released.</p>



<p>To explore more neuroscience news and perspectives, visit <a href="http://thetransmitter.org">thetransmitter.org</a>.</p>



<p>Can you look at all the synaptic connections of a brain, and tell me one nontrivial memory from the organism that has that brain? If so, you shall win the $100,000 prize from the Aspirational Neuroscience group.</p>



<p>I was recently invited for the second time to chair a panel of experts to discuss that question and all the issues around that question - how to decode a non-trivial memory from a static map of synaptic connectivity.</p>



<p>Before I play that recording, let me set the stage a bit more.</p>



<p>Aspirational Neuroscience is a community of neuroscientists run by Kenneth Hayworth, with the goal, from their website, to "balance aspirational thinking with respect to the long-term implications of a successful neuroscience with practical realism about our current state of ignorance and knowledge." One of those aspirations is to decoding things - memories, learned behaviors, and so on - from static connectomes. They hold satellite events at the SfN conference, and invite experts in connectomics from academia and from industry to share their thoughts and progress that might advance that goal.</p>



<p>In this panel discussion, we touch on multiple relevant topics. One question is what is the right experimental design or designs that would answer whether we are decoding memory - what is a benchmark in various model organisms, and for various theoretical frameworks? We discuss some of the obstacles in the way, both technologically and conceptually. Like the fact that proofreading connectome connections - manually verifying and editing them - is a giant bottleneck, or like the very definition of memory, what counts as a memory, let alone a "nontrivial" memory, and so on. And they take lots of questions from the audience as well.</p>



<p>I apologize the audio is not crystal clear in this recording. I did my best to clean it up, and I take full blame for not setting up my audio recorder to capture the best sound. So, if you are a listener, I'd encourage you to check out the video version, which also has subtitles throughout for when the language isn't clear.</p>



<p>Anyway, this is a fun and smart group of people, and I look forward to another one next year I hope.</p>



<p>The last time I did this was episode 180, BI 180, which I link to in the show notes. Before that I had on Ken Hayworth, whom I mentioned runs Aspirational Neuroscience, and Randal Koene, who is on the panel this time. They were on to talk about the future possibility of uploading minds to computers based on connectomes. That was episode 103.</p>



<ul class="wp-block-list">
<li><a href="https://aspirationalneuroscience.org/">Aspirational Neuroscience</a></li>



<li>Panel
<ul class="wp-block-list">
<li><a href="https://scholar.google.com/citations?user=XSjXVbQAAAAJ&amp;hl=en">Michał Januszewski</a><ul><li><a href="https://bsky.app/profile/michalwj.bsky.social">@michalwj.bsky.social</a></li></ul>
<ul class="wp-block-list">
<li>Research scientist (connectomics) with Google Research, automated neural tracing expert</li>
</ul>
</li>



<li><a href="https://alleninstitute.org/person/sven-dorkenwald/">Sven Dorkenwald</a>
<ul class="wp-block-list">
<li><a href="https://bsky.app/profile/sdorkenw.bsky.social">@sdorkenw.bsky.social</a></li>



<li>Research fellow at the Allen Institute, first-author on <a href="https://www.nature.com/articles/s41586-024-07558-y">first full Drosophila connectome</a> paper</li>
</ul>
</li>



<li><a href="https://www.esi-frankfurt.de/people/heleneschmidt/">Helene Schmidt</a><ul><li><a href="https://bsky.app/profile/helenelab.bsky.social">@helenelab.bsky.social</a></li></ul>
<ul class="wp-block-list">
<li>Group leader at Ernst Strungmann Institute, hippocampus connectome &amp; EM expert</li>
</ul>
</li>



<li><a href="https://www.e11.bio/andrew-payne">Andrew Payne</a>
<ul class="wp-block-list">
<li><a href="https://bsky.app/profile/andrewcpayne.bsky.social">@andrewcpayne.bsky.social</a></li>



<li>Founder of <a href="https://www.e11.bio/">E11 Bio</a>, expansion microscopy &amp; viral tracing expert </li>
</ul>
</li>



<li><a href="https://carboncopies.org/About/Team/Bios/RandalKoene/">Randal Koene</a>
<ul class="wp-block-list">
<li>Founder of the <a href="https://carboncopies.org/">Carboncopies Foundation</a>, computational neuroscientist dedicated to the problem of brain emulation.</li>
</ul>
</li>
</ul>
</li>



<li>Related episodes:
<ul class="wp-block-list">
<li><a href="https://braininspired.co/podcast/103/">BI 103 Randal Koene and Ken Hayworth: The Road to Mind Uploading</a></li>



<li><a href="https://braininspired.co/podcast/180/">BI 180 Panel Discussion: Long-term Memory Encoding and Connectome Decoding</a></li>
</ul>
</li>
</ul>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/2292029/c1e-7kpph9jp44a2p1p5-1p78326mfgp9-g1qooj.mp3" length="72408600"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Support the show to get full episodes, full archive, and join the Discord community.









The Transmitter is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.



Read more about our partnership.



Sign up for Brain Inspired email alerts to be notified every time a new Brain Inspired episode is released.



To explore more neuroscience news and perspectives, visit thetransmitter.org.



Can you look at all the synaptic connections of a brain, and tell me one nontrivial memory from the organism that has that brain? If so, you shall win the $100,000 prize from the Aspirational Neuroscience group.



I was recently invited for the second time to chair a panel of experts to discuss that question and all the issues around that question - how to decode a non-trivial memory from a static map of synaptic connectivity.



Before I play that recording, let me set the stage a bit more.



Aspirational Neuroscience is a community of neuroscientists run by Kenneth Hayworth, with the goal, from their website, to "balance aspirational thinking with respect to the long-term implications of a successful neuroscience with practical realism about our current state of ignorance and knowledge." One of those aspirations is to decoding things - memories, learned behaviors, and so on - from static connectomes. They hold satellite events at the SfN conference, and invite experts in connectomics from academia and from industry to share their thoughts and progress that might advance that goal.



In this panel discussion, we touch on multiple relevant topics. One question is what is the right experimental design or designs that would answer whether we are decoding memory - what is a benchmark in various model organisms, and for various theoretical frameworks? We discuss some of the obstacles in the way, both technologically and conceptually. Like the fact that proofreading connectome connections - manually verifying and editing them - is a giant bottleneck, or like the very definition of memory, what counts as a memory, let alone a "nontrivial" memory, and so on. And they take lots of questions from the audience as well.



I apologize the audio is not crystal clear in this recording. I did my best to clean it up, and I take full blame for not setting up my audio recorder to capture the best sound. So, if you are a listener, I'd encourage you to check out the video version, which also has subtitles throughout for when the language isn't clear.



Anyway, this is a fun and smart group of people, and I look forward to another one next year I hope.



The last time I did this was episode 180, BI 180, which I link to in the show notes. Before that I had on Ken Hayworth, whom I mentioned runs Aspirational Neuroscience, and Randal Koene, who is on the panel this time. They were on to talk about the future possibility of uploading minds to computers based on connectomes. That was episode 103.




Aspirational Neuroscience



Panel

Michał Januszewski@michalwj.bsky.social

Research scientist (connectomics) with Google Research, automated neural trac...]]>
                </itunes:summary>
                                                                            <itunes:duration>01:15:08</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 226 Tatiana Engel: The High and Low Dimensional Brain]]>
                </title>
                <pubDate>Wed, 03 Dec 2025 05:00:00 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/2265216</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-226-tatiana-engel-the-high-and-low-dimensional-brain</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes, full archive, and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p><em>The Transmitter</em> is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.</p>



<p>Read more about <a href="https://www.thetransmitter.org/partners/">our partnership</a>.</p>



<p>Sign up for <a href="https://www.thetransmitter.org/newsletters/">Brain Inspired email alerts</a> to be notified every time a new Brain Inspired episode is released.</p>



<p>To explore more neuroscience news and perspectives, visit <a href="http://thetransmitter.org">thetransmitter.org</a>.</p>



<p>Tatiana Engel runs the Engel lab at Princeton University in the Princeton Neuroscience Institute. She's also part of the <a href="https://www.internationalbrainlab.com/">International Brain Laboratory</a>, a massive across-lab, across-world, collaboration which you'll hear more about. My main impetus for inviting Tatiana was to talk about two projects she's been working on. One of those is connecting the functional dynamics of cognition with the connectivity of the underlying neural networks on which those dynamics unfold. We know the brain is high-dimensional - it has lots of interacting connections, we know the activity of those networks can often be described by lower-dimensional entities called manifolds, and Tatiana and her lab work to connect those two processes with something they call latent circuits. So you'll hear about that, you'll also hear about how the timescales of neurons across the brain are different but the same, why this is cool and surprising, and we discuss many topics around those main topics. </p>



<ul class="wp-block-list">
<li><a href="https://engel-lab.princeton.edu/people/tatiana-engel">Engel Lab</a>.</li>



<li><a href="https://bsky.app/profile/engeltatiana.bsky.social">@engeltatiana.bsky.social</a>.</li>



<li><a href="https://www.internationalbrainlab.com/">International Brain Laboratory</a>.</li>



<li>Related papers:
<ul class="wp-block-list">
<li><a href="https://www.nature.com/articles/s41593-025-01869-7">Latent circuit inference from heterogeneous neural responses during cognitive tasks</a></li>



<li><a href="https://www.nature.com/articles/s41586-025-09199-1">The dynamics and geometry of choice in the premotor cortex</a>.</li>



<li><a href="https://www.nature.com/articles/s41583-023-00693-x">A unifying perspective on neural manifolds and circuits for cognition</a></li>



<li><a href="https://www.biorxiv.org/content/10.1101/2025.08.30.673281v1">Brain-wide organization of intrinsic timescales at single-neuron resolution</a></li>



<li><a href="https://www.nature.com/articles/s42256-025-01127-2">Single-unit activations confer inductive biases for emergent circuit solutions to cognitive tasks</a>.</li>
</ul>
</li>
</ul>



<p>0:00 - Intro
3:03 - No central executive
5:01 - International brain lab
15:57 - Tatiana's background
24:49 - Dynamical systems
17:48 - Manifolds
33:10 - Latent task circuits
47:01 - Mixed selectivity
1:00:21 - Internal and external dynamics
1:03:47 - Modern vs classical modeling
1:14:30 - Intrinsic timescales
1:26:05 - Single trial dynamics
1:29:59 - Future of manifolds</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Support the show to get full episodes, full archive, and join the Discord community.









The Transmitter is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.



Read more about our partnership.



Sign up for Brain Inspired email alerts to be notified every time a new Brain Inspired episode is released.



To explore more neuroscience news and perspectives, visit thetransmitter.org.



Tatiana Engel runs the Engel lab at Princeton University in the Princeton Neuroscience Institute. She's also part of the International Brain Laboratory, a massive across-lab, across-world, collaboration which you'll hear more about. My main impetus for inviting Tatiana was to talk about two projects she's been working on. One of those is connecting the functional dynamics of cognition with the connectivity of the underlying neural networks on which those dynamics unfold. We know the brain is high-dimensional - it has lots of interacting connections, we know the activity of those networks can often be described by lower-dimensional entities called manifolds, and Tatiana and her lab work to connect those two processes with something they call latent circuits. So you'll hear about that, you'll also hear about how the timescales of neurons across the brain are different but the same, why this is cool and surprising, and we discuss many topics around those main topics. 




Engel Lab.



@engeltatiana.bsky.social.



International Brain Laboratory.



Related papers:

Latent circuit inference from heterogeneous neural responses during cognitive tasks



The dynamics and geometry of choice in the premotor cortex.



A unifying perspective on neural manifolds and circuits for cognition



Brain-wide organization of intrinsic timescales at single-neuron resolution



Single-unit activations confer inductive biases for emergent circuit solutions to cognitive tasks.






0:00 - Intro
3:03 - No central executive
5:01 - International brain lab
15:57 - Tatiana's background
24:49 - Dynamical systems
17:48 - Manifolds
33:10 - Latent task circuits
47:01 - Mixed selectivity
1:00:21 - Internal and external dynamics
1:03:47 - Modern vs classical modeling
1:14:30 - Intrinsic timescales
1:26:05 - Single trial dynamics
1:29:59 - Future of manifolds]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 226 Tatiana Engel: The High and Low Dimensional Brain]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes, full archive, and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p><em>The Transmitter</em> is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.</p>



<p>Read more about <a href="https://www.thetransmitter.org/partners/">our partnership</a>.</p>



<p>Sign up for <a href="https://www.thetransmitter.org/newsletters/">Brain Inspired email alerts</a> to be notified every time a new Brain Inspired episode is released.</p>



<p>To explore more neuroscience news and perspectives, visit <a href="http://thetransmitter.org">thetransmitter.org</a>.</p>



<p>Tatiana Engel runs the Engel lab at Princeton University in the Princeton Neuroscience Institute. She's also part of the <a href="https://www.internationalbrainlab.com/">International Brain Laboratory</a>, a massive across-lab, across-world, collaboration which you'll hear more about. My main impetus for inviting Tatiana was to talk about two projects she's been working on. One of those is connecting the functional dynamics of cognition with the connectivity of the underlying neural networks on which those dynamics unfold. We know the brain is high-dimensional - it has lots of interacting connections, we know the activity of those networks can often be described by lower-dimensional entities called manifolds, and Tatiana and her lab work to connect those two processes with something they call latent circuits. So you'll hear about that, you'll also hear about how the timescales of neurons across the brain are different but the same, why this is cool and surprising, and we discuss many topics around those main topics. </p>



<ul class="wp-block-list">
<li><a href="https://engel-lab.princeton.edu/people/tatiana-engel">Engel Lab</a>.</li>



<li><a href="https://bsky.app/profile/engeltatiana.bsky.social">@engeltatiana.bsky.social</a>.</li>



<li><a href="https://www.internationalbrainlab.com/">International Brain Laboratory</a>.</li>



<li>Related papers:
<ul class="wp-block-list">
<li><a href="https://www.nature.com/articles/s41593-025-01869-7">Latent circuit inference from heterogeneous neural responses during cognitive tasks</a></li>



<li><a href="https://www.nature.com/articles/s41586-025-09199-1">The dynamics and geometry of choice in the premotor cortex</a>.</li>



<li><a href="https://www.nature.com/articles/s41583-023-00693-x">A unifying perspective on neural manifolds and circuits for cognition</a></li>



<li><a href="https://www.biorxiv.org/content/10.1101/2025.08.30.673281v1">Brain-wide organization of intrinsic timescales at single-neuron resolution</a></li>



<li><a href="https://www.nature.com/articles/s42256-025-01127-2">Single-unit activations confer inductive biases for emergent circuit solutions to cognitive tasks</a>.</li>
</ul>
</li>
</ul>



<p>0:00 - Intro
3:03 - No central executive
5:01 - International brain lab
15:57 - Tatiana's background
24:49 - Dynamical systems
17:48 - Manifolds
33:10 - Latent task circuits
47:01 - Mixed selectivity
1:00:21 - Internal and external dynamics
1:03:47 - Modern vs classical modeling
1:14:30 - Intrinsic timescales
1:26:05 - Single trial dynamics
1:29:59 - Future of manifolds</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/2265216/c1e-2k22hm2gnrhvr8z3-7zxjkr9jf9m-5wuzxo.mp3" length="93688785"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Support the show to get full episodes, full archive, and join the Discord community.









The Transmitter is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.



Read more about our partnership.



Sign up for Brain Inspired email alerts to be notified every time a new Brain Inspired episode is released.



To explore more neuroscience news and perspectives, visit thetransmitter.org.



Tatiana Engel runs the Engel lab at Princeton University in the Princeton Neuroscience Institute. She's also part of the International Brain Laboratory, a massive across-lab, across-world, collaboration which you'll hear more about. My main impetus for inviting Tatiana was to talk about two projects she's been working on. One of those is connecting the functional dynamics of cognition with the connectivity of the underlying neural networks on which those dynamics unfold. We know the brain is high-dimensional - it has lots of interacting connections, we know the activity of those networks can often be described by lower-dimensional entities called manifolds, and Tatiana and her lab work to connect those two processes with something they call latent circuits. So you'll hear about that, you'll also hear about how the timescales of neurons across the brain are different but the same, why this is cool and surprising, and we discuss many topics around those main topics. 




Engel Lab.



@engeltatiana.bsky.social.



International Brain Laboratory.



Related papers:

Latent circuit inference from heterogeneous neural responses during cognitive tasks



The dynamics and geometry of choice in the premotor cortex.



A unifying perspective on neural manifolds and circuits for cognition



Brain-wide organization of intrinsic timescales at single-neuron resolution



Single-unit activations confer inductive biases for emergent circuit solutions to cognitive tasks.






0:00 - Intro
3:03 - No central executive
5:01 - International brain lab
15:57 - Tatiana's background
24:49 - Dynamical systems
17:48 - Manifolds
33:10 - Latent task circuits
47:01 - Mixed selectivity
1:00:21 - Internal and external dynamics
1:03:47 - Modern vs classical modeling
1:14:30 - Intrinsic timescales
1:26:05 - Single trial dynamics
1:29:59 - Future of manifolds]]>
                </itunes:summary>
                                                                            <itunes:duration>01:36:18</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 225 Henk De Regt: Understanding in Machines and Humans]]>
                </title>
                <pubDate>Wed, 19 Nov 2025 05:00:00 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/2230640</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-225-henk-de-regt-understanding-in-machines-and-humans</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes, full archive, and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p><em>The Transmitter</em> is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.</p>



<p>Read more about <a href="https://www.thetransmitter.org/partners/">our partnership</a>.</p>



<p>Sign up for <a href="https://www.thetransmitter.org/newsletters/">Brain Inspired email alerts</a> to be notified every time a new Brain Inspired episode is released.</p>



<p>To explore more neuroscience news and perspectives, visit <a href="http://thetransmitter.org">thetransmitter.org</a>.</p>



<p><a href="https://www.ru.nl/en/people/regt-h-de">Henk de Regt</a> is a professor of Philosophy of Science and the director of the <a href="https://www.ru.nl/en/isis">Institute for Science in Society</a> at Radboud University. Henk wrote the book on Understanding. Literally, he wrote what has become a classic in philosophy of science, <a href="https://amzn.to/4hj6k1X">Understanding Scientific Understanding</a>.</p>





<p>Henks' account of understanding goes roughly like this, but you can learn more in his book and other writings. To claim you understand something in science requires that you can produce a theory-based explanation of whatever you claim to understand, and it depends on you having the right scientific skills to be able to work productively with that theory - for example, making qualitative predictions about it without performing calculations. So understanding is contextual and depends on the skills of the understander.</p>



<p>There's more nuance to it, so like I said you should read the book, but this account of understanding distinguishes it from explanation itself, and distinguishes it from other accounts of understanding, which take understanding to be either a personal subjective sense - that feeling of something clicking in your mind - or simply the addition of more facts about something.</p>



<p>In this conversation, we revisit Henk's work on understanding, and how it touches on many other topics, like realism, the use of metaphors, how public understanding differs from expert understanding, idealization and abstraction in science, and so on.</p>



<p>And, because Henk's kind of understanding doesn't depend on subjective awareness or things being true, he and his cohorts have begun working on whether there could be a benchmark for degrees of understanding, to possibly asses whether AI demonstrates understanding, and to use as a common benchmark for humans and machines.</p>



<ul class="wp-block-list">
<li><a href="https://scholar.google.com/citations?view_op=list_works&amp;hl=en&amp;hl=en&amp;user=sBxqGrsAAAAJ&amp;sortby=pubdate">Google Scholar page</a></li>



<li>Social: <a href="https://bsky.app/profile/did:plc:jjoeezq5qw5ofmqtd5um7svi">@henkderegt.bsky.social</a>;  </li>



<li>Book:
<ul class="wp-block-list">
<li><a href="https://amzn.to/4hj6k1X">Understanding Scientific Understanding</a>.</li>
</ul>
</li>



<li>Related papers
<ul class="wp-block-list">
<li><a href="https://link.springer.com/article/10.1007/s11023-024-09657-1">Towards a benchmark for scientific understanding in humans and machines</a></li>



<li><a href="https://www.jbe-platform.com/content/journals/10.1075/msw.22016.sme">Metaphors as tools for understanding in science communication among experts and to the public</a></li>



<li><a href="https://link.springer.com/article/10.1007/s40656-024-00644-4">Two scientific perspectives on nerve signal propagation: how incompatible approaches jointly promote progress in explanatory understanding</a></li>
</ul>
</li>
</ul>



<p>0:00 - Intro
10:13...</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Support the show to get full episodes, full archive, and join the Discord community.









The Transmitter is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.



Read more about our partnership.



Sign up for Brain Inspired email alerts to be notified every time a new Brain Inspired episode is released.



To explore more neuroscience news and perspectives, visit thetransmitter.org.



Henk de Regt is a professor of Philosophy of Science and the director of the Institute for Science in Society at Radboud University. Henk wrote the book on Understanding. Literally, he wrote what has become a classic in philosophy of science, Understanding Scientific Understanding.





Henks' account of understanding goes roughly like this, but you can learn more in his book and other writings. To claim you understand something in science requires that you can produce a theory-based explanation of whatever you claim to understand, and it depends on you having the right scientific skills to be able to work productively with that theory - for example, making qualitative predictions about it without performing calculations. So understanding is contextual and depends on the skills of the understander.



There's more nuance to it, so like I said you should read the book, but this account of understanding distinguishes it from explanation itself, and distinguishes it from other accounts of understanding, which take understanding to be either a personal subjective sense - that feeling of something clicking in your mind - or simply the addition of more facts about something.



In this conversation, we revisit Henk's work on understanding, and how it touches on many other topics, like realism, the use of metaphors, how public understanding differs from expert understanding, idealization and abstraction in science, and so on.



And, because Henk's kind of understanding doesn't depend on subjective awareness or things being true, he and his cohorts have begun working on whether there could be a benchmark for degrees of understanding, to possibly asses whether AI demonstrates understanding, and to use as a common benchmark for humans and machines.




Google Scholar page



Social: @henkderegt.bsky.social;  



Book:

Understanding Scientific Understanding.





Related papers

Towards a benchmark for scientific understanding in humans and machines



Metaphors as tools for understanding in science communication among experts and to the public



Two scientific perspectives on nerve signal propagation: how incompatible approaches jointly promote progress in explanatory understanding






0:00 - Intro
10:13...]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 225 Henk De Regt: Understanding in Machines and Humans]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes, full archive, and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p><em>The Transmitter</em> is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.</p>



<p>Read more about <a href="https://www.thetransmitter.org/partners/">our partnership</a>.</p>



<p>Sign up for <a href="https://www.thetransmitter.org/newsletters/">Brain Inspired email alerts</a> to be notified every time a new Brain Inspired episode is released.</p>



<p>To explore more neuroscience news and perspectives, visit <a href="http://thetransmitter.org">thetransmitter.org</a>.</p>



<p><a href="https://www.ru.nl/en/people/regt-h-de">Henk de Regt</a> is a professor of Philosophy of Science and the director of the <a href="https://www.ru.nl/en/isis">Institute for Science in Society</a> at Radboud University. Henk wrote the book on Understanding. Literally, he wrote what has become a classic in philosophy of science, <a href="https://amzn.to/4hj6k1X">Understanding Scientific Understanding</a>.</p>





<p>Henks' account of understanding goes roughly like this, but you can learn more in his book and other writings. To claim you understand something in science requires that you can produce a theory-based explanation of whatever you claim to understand, and it depends on you having the right scientific skills to be able to work productively with that theory - for example, making qualitative predictions about it without performing calculations. So understanding is contextual and depends on the skills of the understander.</p>



<p>There's more nuance to it, so like I said you should read the book, but this account of understanding distinguishes it from explanation itself, and distinguishes it from other accounts of understanding, which take understanding to be either a personal subjective sense - that feeling of something clicking in your mind - or simply the addition of more facts about something.</p>



<p>In this conversation, we revisit Henk's work on understanding, and how it touches on many other topics, like realism, the use of metaphors, how public understanding differs from expert understanding, idealization and abstraction in science, and so on.</p>



<p>And, because Henk's kind of understanding doesn't depend on subjective awareness or things being true, he and his cohorts have begun working on whether there could be a benchmark for degrees of understanding, to possibly asses whether AI demonstrates understanding, and to use as a common benchmark for humans and machines.</p>



<ul class="wp-block-list">
<li><a href="https://scholar.google.com/citations?view_op=list_works&amp;hl=en&amp;hl=en&amp;user=sBxqGrsAAAAJ&amp;sortby=pubdate">Google Scholar page</a></li>



<li>Social: <a href="https://bsky.app/profile/did:plc:jjoeezq5qw5ofmqtd5um7svi">@henkderegt.bsky.social</a>;  </li>



<li>Book:
<ul class="wp-block-list">
<li><a href="https://amzn.to/4hj6k1X">Understanding Scientific Understanding</a>.</li>
</ul>
</li>



<li>Related papers
<ul class="wp-block-list">
<li><a href="https://link.springer.com/article/10.1007/s11023-024-09657-1">Towards a benchmark for scientific understanding in humans and machines</a></li>



<li><a href="https://www.jbe-platform.com/content/journals/10.1075/msw.22016.sme">Metaphors as tools for understanding in science communication among experts and to the public</a></li>



<li><a href="https://link.springer.com/article/10.1007/s40656-024-00644-4">Two scientific perspectives on nerve signal propagation: how incompatible approaches jointly promote progress in explanatory understanding</a></li>
</ul>
</li>
</ul>



<p>0:00 - Intro
10:13 - Philosophy of explanation vs understanding
14:32 - Different accounts of understanding
20:29 - Henk's account of understanding
26:47 - What counts as intelligible?
34:09 - Hodgkin and Huxley alternative
37:54 - Familiarity vs understanding
44:42 - Measuring understanding
1:02:53 - Machine understanding
1:16:39 - Non-factive understanding
1:23:34 - Abstraction vs understanding
1:31:07 - Public understanding of science
1:41:35 - Reflections on the book</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/2230640/c1e-wmddc3p767u5xdzq-gp91jq4db017-ggkfly.mp3" length="100485065"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Support the show to get full episodes, full archive, and join the Discord community.









The Transmitter is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.



Read more about our partnership.



Sign up for Brain Inspired email alerts to be notified every time a new Brain Inspired episode is released.



To explore more neuroscience news and perspectives, visit thetransmitter.org.



Henk de Regt is a professor of Philosophy of Science and the director of the Institute for Science in Society at Radboud University. Henk wrote the book on Understanding. Literally, he wrote what has become a classic in philosophy of science, Understanding Scientific Understanding.





Henks' account of understanding goes roughly like this, but you can learn more in his book and other writings. To claim you understand something in science requires that you can produce a theory-based explanation of whatever you claim to understand, and it depends on you having the right scientific skills to be able to work productively with that theory - for example, making qualitative predictions about it without performing calculations. So understanding is contextual and depends on the skills of the understander.



There's more nuance to it, so like I said you should read the book, but this account of understanding distinguishes it from explanation itself, and distinguishes it from other accounts of understanding, which take understanding to be either a personal subjective sense - that feeling of something clicking in your mind - or simply the addition of more facts about something.



In this conversation, we revisit Henk's work on understanding, and how it touches on many other topics, like realism, the use of metaphors, how public understanding differs from expert understanding, idealization and abstraction in science, and so on.



And, because Henk's kind of understanding doesn't depend on subjective awareness or things being true, he and his cohorts have begun working on whether there could be a benchmark for degrees of understanding, to possibly asses whether AI demonstrates understanding, and to use as a common benchmark for humans and machines.




Google Scholar page



Social: @henkderegt.bsky.social;  



Book:

Understanding Scientific Understanding.





Related papers

Towards a benchmark for scientific understanding in humans and machines



Metaphors as tools for understanding in science communication among experts and to the public



Two scientific perspectives on nerve signal propagation: how incompatible approaches jointly promote progress in explanatory understanding






0:00 - Intro
10:13...]]>
                </itunes:summary>
                                                                            <itunes:duration>01:43:30</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 224 Dan Nicholson: Schrödinger's What is Life? Revisited]]>
                </title>
                <pubDate>Wed, 05 Nov 2025 10:39:13 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/2193282</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-224-dan-nicholson-schrodingers-what-is-life-revisited</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes, full archive, and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p><em>The Transmitter</em> is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.</p>



<p>Read more about <a href="https://www.thetransmitter.org/partners/">our partnership</a>.</p>



<p>Sign up for <a href="https://www.thetransmitter.org/newsletters/">Brain Inspired email alerts</a> to be notified every time a new Brain Inspired episode is released.</p>



<p>To explore more neuroscience news and perspectives, visit <a href="http://thetransmitter.org">thetransmitter.org</a>.</p>





<p>My guest today is Dan Nicholson, Assistant Professor of Philosophy at George Mason University, here to talk about his little book, <a href="https://www.cambridge.org/core/books/what-is-life-revisited/E6B3EA136720CF50C9480ADB8F41A6F4">What Is Life? Revisited</a>. Erwin Schrödinger's What Is Life is a famous book that people point to as having predicted DNA and influenced and inspired many well-known biologists ushering in the molecular biology revolution. But Schrödinger was a physicist, not a biologist, and he spent very little time and effort toward understanding biology.</p>



<p>What was he up to, why did he write this "famous little book"? Schrödinger had an agenda, a physics agenda. He wanted to save the older deterministic version of quantum physics from the new indeterministic version. When Dan was on the podcast a few years ago, we talked about the machine view of biological systems, how everything has become a "mechanism", and how that view fails to capture what modern science is actually telling us, that organisms are unlike machines in important ways. That work of Dan's led him down this path to Schrödinger's What Is Life, which he argues was a major contributor to that machine metaphor so ubiquitous today in biology. One of the reasons I'm interested in this kind of work is because the cognitive sciences, including neuroscience and artificial intelligence, inherited this mechanistic perspective, and swallowed it so hard that if you don't include the word "mechanism" in your research paper, you're vastly decreasing your chances of getting your work published, when in fact the mechanistic perspective is one super useful perspective among many.</p>



<ul class="wp-block-list">
<li><a href="https://philosophy.gmu.edu/people/dnicho">Dan’s website</a>. <a href="https://scholar.google.com/citations?hl=en&amp;user=5gxpRPYAAAAJ&amp;view_op=list_works&amp;sortby=pubdate">Google Scholar</a>.</li>



<li>Social: <a href="https://twitter.com/NicholsonHPBio">@NicholsonHPBio</a>; <a href="https://bsky.app/profile/djnicholson.bsky.social">@djnicholson.bsky.social</a></li>



<li><a href="https://www.dropbox.com/scl/fi/xe79f7jgks4rirpsv7t58/Nicholson-2025-What-Is-Life-Revisited.pdf?rlkey=4orqyk437gq7ne2y428jq2tlx&amp;dl=1" target="_blank" rel="noreferrer noopener">What Is Life? Revisited</a></li>



<li>Previous episode:
<ul class="wp-block-list">
<li><a href="https://braininspired.co/podcast/150/">BI 150 Dan Nicholson: Machines, Organisms, Processes</a></li>
</ul>
</li>
</ul>



<p><a href="https://www.thetransmitter.org/wp-content/uploads/2025/11/BI-224-transcript-Nicholson.pdf" target="_blank" rel="noreferrer noopener">Read the transcript.</a></p>



<p>0:00 - Intro
7:27 - Why Schrodinger wrote What is Life
15:13 - Aperiodic crystal and the meaning of code
21:39 - Order-from-order, order-from-disorder
28:32 - Appeal to authority
37:48 - Cell as machine
39:33 - Relation between DNA and organism (development)
44:44 - Negentropy
53:54 - Original contributions
58:54 - Mechanistic metaphor...</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Support the show to get full episodes, full archive, and join the Discord community.









The Transmitter is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.



Read more about our partnership.



Sign up for Brain Inspired email alerts to be notified every time a new Brain Inspired episode is released.



To explore more neuroscience news and perspectives, visit thetransmitter.org.





My guest today is Dan Nicholson, Assistant Professor of Philosophy at George Mason University, here to talk about his little book, What Is Life? Revisited. Erwin Schrödinger's What Is Life is a famous book that people point to as having predicted DNA and influenced and inspired many well-known biologists ushering in the molecular biology revolution. But Schrödinger was a physicist, not a biologist, and he spent very little time and effort toward understanding biology.



What was he up to, why did he write this "famous little book"? Schrödinger had an agenda, a physics agenda. He wanted to save the older deterministic version of quantum physics from the new indeterministic version. When Dan was on the podcast a few years ago, we talked about the machine view of biological systems, how everything has become a "mechanism", and how that view fails to capture what modern science is actually telling us, that organisms are unlike machines in important ways. That work of Dan's led him down this path to Schrödinger's What Is Life, which he argues was a major contributor to that machine metaphor so ubiquitous today in biology. One of the reasons I'm interested in this kind of work is because the cognitive sciences, including neuroscience and artificial intelligence, inherited this mechanistic perspective, and swallowed it so hard that if you don't include the word "mechanism" in your research paper, you're vastly decreasing your chances of getting your work published, when in fact the mechanistic perspective is one super useful perspective among many.




Dan’s website. Google Scholar.



Social: @NicholsonHPBio; @djnicholson.bsky.social



What Is Life? Revisited



Previous episode:

BI 150 Dan Nicholson: Machines, Organisms, Processes






Read the transcript.



0:00 - Intro
7:27 - Why Schrodinger wrote What is Life
15:13 - Aperiodic crystal and the meaning of code
21:39 - Order-from-order, order-from-disorder
28:32 - Appeal to authority
37:48 - Cell as machine
39:33 - Relation between DNA and organism (development)
44:44 - Negentropy
53:54 - Original contributions
58:54 - Mechanistic metaphor...]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 224 Dan Nicholson: Schrödinger's What is Life? Revisited]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes, full archive, and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p><em>The Transmitter</em> is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.</p>



<p>Read more about <a href="https://www.thetransmitter.org/partners/">our partnership</a>.</p>



<p>Sign up for <a href="https://www.thetransmitter.org/newsletters/">Brain Inspired email alerts</a> to be notified every time a new Brain Inspired episode is released.</p>



<p>To explore more neuroscience news and perspectives, visit <a href="http://thetransmitter.org">thetransmitter.org</a>.</p>





<p>My guest today is Dan Nicholson, Assistant Professor of Philosophy at George Mason University, here to talk about his little book, <a href="https://www.cambridge.org/core/books/what-is-life-revisited/E6B3EA136720CF50C9480ADB8F41A6F4">What Is Life? Revisited</a>. Erwin Schrödinger's What Is Life is a famous book that people point to as having predicted DNA and influenced and inspired many well-known biologists ushering in the molecular biology revolution. But Schrödinger was a physicist, not a biologist, and he spent very little time and effort toward understanding biology.</p>



<p>What was he up to, why did he write this "famous little book"? Schrödinger had an agenda, a physics agenda. He wanted to save the older deterministic version of quantum physics from the new indeterministic version. When Dan was on the podcast a few years ago, we talked about the machine view of biological systems, how everything has become a "mechanism", and how that view fails to capture what modern science is actually telling us, that organisms are unlike machines in important ways. That work of Dan's led him down this path to Schrödinger's What Is Life, which he argues was a major contributor to that machine metaphor so ubiquitous today in biology. One of the reasons I'm interested in this kind of work is because the cognitive sciences, including neuroscience and artificial intelligence, inherited this mechanistic perspective, and swallowed it so hard that if you don't include the word "mechanism" in your research paper, you're vastly decreasing your chances of getting your work published, when in fact the mechanistic perspective is one super useful perspective among many.</p>



<ul class="wp-block-list">
<li><a href="https://philosophy.gmu.edu/people/dnicho">Dan’s website</a>. <a href="https://scholar.google.com/citations?hl=en&amp;user=5gxpRPYAAAAJ&amp;view_op=list_works&amp;sortby=pubdate">Google Scholar</a>.</li>



<li>Social: <a href="https://twitter.com/NicholsonHPBio">@NicholsonHPBio</a>; <a href="https://bsky.app/profile/djnicholson.bsky.social">@djnicholson.bsky.social</a></li>



<li><a href="https://www.dropbox.com/scl/fi/xe79f7jgks4rirpsv7t58/Nicholson-2025-What-Is-Life-Revisited.pdf?rlkey=4orqyk437gq7ne2y428jq2tlx&amp;dl=1" target="_blank" rel="noreferrer noopener">What Is Life? Revisited</a></li>



<li>Previous episode:
<ul class="wp-block-list">
<li><a href="https://braininspired.co/podcast/150/">BI 150 Dan Nicholson: Machines, Organisms, Processes</a></li>
</ul>
</li>
</ul>



<p><a href="https://www.thetransmitter.org/wp-content/uploads/2025/11/BI-224-transcript-Nicholson.pdf" target="_blank" rel="noreferrer noopener">Read the transcript.</a></p>



<p>0:00 - Intro
7:27 - Why Schrodinger wrote What is Life
15:13 - Aperiodic crystal and the meaning of code
21:39 - Order-from-order, order-from-disorder
28:32 - Appeal to authority
37:48 - Cell as machine
39:33 - Relation between DNA and organism (development)
44:44 - Negentropy
53:54 - Original contributions
58:54 - Mechanistic metaphor in neuroscience
1:16:05 - What's the lesson?
1:28:06 - Historical sleuthing
1:39:49 - Modern philosophy of biology</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/2193282/c1e-2k22hmz6ppuv5vd8-5zdq8g4xun7p-kxnnls.mp3" length="105790417"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Support the show to get full episodes, full archive, and join the Discord community.









The Transmitter is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.



Read more about our partnership.



Sign up for Brain Inspired email alerts to be notified every time a new Brain Inspired episode is released.



To explore more neuroscience news and perspectives, visit thetransmitter.org.





My guest today is Dan Nicholson, Assistant Professor of Philosophy at George Mason University, here to talk about his little book, What Is Life? Revisited. Erwin Schrödinger's What Is Life is a famous book that people point to as having predicted DNA and influenced and inspired many well-known biologists ushering in the molecular biology revolution. But Schrödinger was a physicist, not a biologist, and he spent very little time and effort toward understanding biology.



What was he up to, why did he write this "famous little book"? Schrödinger had an agenda, a physics agenda. He wanted to save the older deterministic version of quantum physics from the new indeterministic version. When Dan was on the podcast a few years ago, we talked about the machine view of biological systems, how everything has become a "mechanism", and how that view fails to capture what modern science is actually telling us, that organisms are unlike machines in important ways. That work of Dan's led him down this path to Schrödinger's What Is Life, which he argues was a major contributor to that machine metaphor so ubiquitous today in biology. One of the reasons I'm interested in this kind of work is because the cognitive sciences, including neuroscience and artificial intelligence, inherited this mechanistic perspective, and swallowed it so hard that if you don't include the word "mechanism" in your research paper, you're vastly decreasing your chances of getting your work published, when in fact the mechanistic perspective is one super useful perspective among many.




Dan’s website. Google Scholar.



Social: @NicholsonHPBio; @djnicholson.bsky.social



What Is Life? Revisited



Previous episode:

BI 150 Dan Nicholson: Machines, Organisms, Processes






Read the transcript.



0:00 - Intro
7:27 - Why Schrodinger wrote What is Life
15:13 - Aperiodic crystal and the meaning of code
21:39 - Order-from-order, order-from-disorder
28:32 - Appeal to authority
37:48 - Cell as machine
39:33 - Relation between DNA and organism (development)
44:44 - Negentropy
53:54 - Original contributions
58:54 - Mechanistic metaphor...]]>
                </itunes:summary>
                                                                            <itunes:duration>01:49:02</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 223 Vicente Raja: Ecological Psychology Motifs in Neuroscience]]>
                </title>
                <pubDate>Wed, 22 Oct 2025 04:00:00 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/2169679</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-223-vicente-raja-ecological-psychology-motifs-in-neuroscience</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes, full archive, and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p>Vicente Raja is a research fellow at University of Murcia in Spain, where he is also part of the Minimal Intelligence Lab run by Paco Cavo, where they study plant behavior, and he is external affiliate faculty of the Rotman Institute of Philosophy at Western University. He is a philosopher, and he is a cognitive scientist, and he specializes in applying concepts from ecological psychology to understand how brains, and organisms, including plants, get about in the world.</p>



<p>We talk about many facets of his research, both philosophical and scientific, and maybe the best way to describe the conversation is a tour among many of the concepts in ecological psychology - like affordances, ecological information, direct perception, and resonance, and how those concepts do and don't, and should or shouldn’t, contribute to our understanding of brains and minds.</p>



<p>We also discuss Vicente's use of the term motif to describe scientific concepts that allow different researches to study roughly the same things even though they have different definitions for those things, and toward the end we touch on his work studying plant behavior.</p>





<ul class="wp-block-list">
<li><a href="https://www.um.es/mintlab/index.php/about/people/vicente-raja/">MINT Lab</a>.</li>



<li>Book: <a href="https://amzn.to/3VVBxOD" target="_blank" rel="noreferrer noopener">Ecological psychology</a></li>



<li>Social: <a href="https://bsky.app/profile/did:plc:cta6lto5xobzzbu67nohfcsk">@diovicen.bsky.social</a></li>



<li>Related papers
<ul class="wp-block-list">
<li><a href="https://arxiv.org/pdf/2206.04603">In search for an alternative to the computer metaphor of the mind and brain</a></li>



<li><a href="https://link.springer.com/article/10.1007/s11097-020-09711-0">Embodiment and cognitive neuroscience: the forgotten tales</a>.</li>



<li><a href="https://onlinelibrary.wiley.com/doi/pdfdirect/10.1111/ejn.16434">The motifs of radical embodied neuroscience</a></li>



<li><a href="https://www.nature.com/articles/s41598-020-76588-z">The Dynamics of Plant Nutation</a></li>



<li><a href="https://www.researchgate.net/publication/395300333_Ecological_Resonance_Is_Reflected_in_Human_Brain_Activity">Ecological Resonance Is Reflected in Human Brain Activity</a></li>



<li><a href="https://www.researchgate.net/publication/395502859_Affordances_are_for_life_and_not_just_for_maximizing_reproductive_fitness">Affordances are for life (and not just for maximizing reproductive fitness)</a></li>



<li><a href="https://www.researchgate.net/publication/382608399_Two_species_of_realism?_tp=eyJjb250ZXh0Ijp7ImZpcnN0UGFnZSI6InByb2ZpbGUiLCJwYWdlIjoicHJvZmlsZSJ9fQ">Two species of realism</a></li>
</ul>
</li>



<li>Lots of previous guests and topics mentioned:
<ul class="wp-block-list">
<li><a href="https://braininspired.co/podcast/152/">BI 152 Michael L. Anderson: After Phrenology: Neural Reuse</a></li>



<li><a href="https://braininspired.co/podcast/190/">BI 190 Luis Favela: The Ecological Brain</a></li>



<li><a href="https://braininspired.co/podcast/191/">BI 191 Damian Kelty-Stephen: Fractal Turbulent Cascading Intelligence</a></li>
</ul>
</li>
</ul>



<p>Read the <a href="https://www.thetransmitter.org/wp-content/uploads/2025/10/BI-223-transcript-Raja.pdf" target="_blank" rel="noreferrer noopener">transcript</a>.</p>



<p>0:00 - Intro
4:55 - Affordances and neuroscience
13:46 - Motifs
39:41- Reconciling neuroscience and ecological psychology
1:07:55 - Predictive processing
1:15:32 - Resonance
1:23:00 - Biggest holes in ecological psychology
1:29:50 - Plant cognition</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Support the show to get full episodes, full archive, and join the Discord community.









Vicente Raja is a research fellow at University of Murcia in Spain, where he is also part of the Minimal Intelligence Lab run by Paco Cavo, where they study plant behavior, and he is external affiliate faculty of the Rotman Institute of Philosophy at Western University. He is a philosopher, and he is a cognitive scientist, and he specializes in applying concepts from ecological psychology to understand how brains, and organisms, including plants, get about in the world.



We talk about many facets of his research, both philosophical and scientific, and maybe the best way to describe the conversation is a tour among many of the concepts in ecological psychology - like affordances, ecological information, direct perception, and resonance, and how those concepts do and don't, and should or shouldn’t, contribute to our understanding of brains and minds.



We also discuss Vicente's use of the term motif to describe scientific concepts that allow different researches to study roughly the same things even though they have different definitions for those things, and toward the end we touch on his work studying plant behavior.






MINT Lab.



Book: Ecological psychology



Social: @diovicen.bsky.social



Related papers

In search for an alternative to the computer metaphor of the mind and brain



Embodiment and cognitive neuroscience: the forgotten tales.



The motifs of radical embodied neuroscience



The Dynamics of Plant Nutation



Ecological Resonance Is Reflected in Human Brain Activity



Affordances are for life (and not just for maximizing reproductive fitness)



Two species of realism





Lots of previous guests and topics mentioned:

BI 152 Michael L. Anderson: After Phrenology: Neural Reuse



BI 190 Luis Favela: The Ecological Brain



BI 191 Damian Kelty-Stephen: Fractal Turbulent Cascading Intelligence






Read the transcript.



0:00 - Intro
4:55 - Affordances and neuroscience
13:46 - Motifs
39:41- Reconciling neuroscience and ecological psychology
1:07:55 - Predictive processing
1:15:32 - Resonance
1:23:00 - Biggest holes in ecological psychology
1:29:50 - Plant cognition]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 223 Vicente Raja: Ecological Psychology Motifs in Neuroscience]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes, full archive, and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p>Vicente Raja is a research fellow at University of Murcia in Spain, where he is also part of the Minimal Intelligence Lab run by Paco Cavo, where they study plant behavior, and he is external affiliate faculty of the Rotman Institute of Philosophy at Western University. He is a philosopher, and he is a cognitive scientist, and he specializes in applying concepts from ecological psychology to understand how brains, and organisms, including plants, get about in the world.</p>



<p>We talk about many facets of his research, both philosophical and scientific, and maybe the best way to describe the conversation is a tour among many of the concepts in ecological psychology - like affordances, ecological information, direct perception, and resonance, and how those concepts do and don't, and should or shouldn’t, contribute to our understanding of brains and minds.</p>



<p>We also discuss Vicente's use of the term motif to describe scientific concepts that allow different researches to study roughly the same things even though they have different definitions for those things, and toward the end we touch on his work studying plant behavior.</p>





<ul class="wp-block-list">
<li><a href="https://www.um.es/mintlab/index.php/about/people/vicente-raja/">MINT Lab</a>.</li>



<li>Book: <a href="https://amzn.to/3VVBxOD" target="_blank" rel="noreferrer noopener">Ecological psychology</a></li>



<li>Social: <a href="https://bsky.app/profile/did:plc:cta6lto5xobzzbu67nohfcsk">@diovicen.bsky.social</a></li>



<li>Related papers
<ul class="wp-block-list">
<li><a href="https://arxiv.org/pdf/2206.04603">In search for an alternative to the computer metaphor of the mind and brain</a></li>



<li><a href="https://link.springer.com/article/10.1007/s11097-020-09711-0">Embodiment and cognitive neuroscience: the forgotten tales</a>.</li>



<li><a href="https://onlinelibrary.wiley.com/doi/pdfdirect/10.1111/ejn.16434">The motifs of radical embodied neuroscience</a></li>



<li><a href="https://www.nature.com/articles/s41598-020-76588-z">The Dynamics of Plant Nutation</a></li>



<li><a href="https://www.researchgate.net/publication/395300333_Ecological_Resonance_Is_Reflected_in_Human_Brain_Activity">Ecological Resonance Is Reflected in Human Brain Activity</a></li>



<li><a href="https://www.researchgate.net/publication/395502859_Affordances_are_for_life_and_not_just_for_maximizing_reproductive_fitness">Affordances are for life (and not just for maximizing reproductive fitness)</a></li>



<li><a href="https://www.researchgate.net/publication/382608399_Two_species_of_realism?_tp=eyJjb250ZXh0Ijp7ImZpcnN0UGFnZSI6InByb2ZpbGUiLCJwYWdlIjoicHJvZmlsZSJ9fQ">Two species of realism</a></li>
</ul>
</li>



<li>Lots of previous guests and topics mentioned:
<ul class="wp-block-list">
<li><a href="https://braininspired.co/podcast/152/">BI 152 Michael L. Anderson: After Phrenology: Neural Reuse</a></li>



<li><a href="https://braininspired.co/podcast/190/">BI 190 Luis Favela: The Ecological Brain</a></li>



<li><a href="https://braininspired.co/podcast/191/">BI 191 Damian Kelty-Stephen: Fractal Turbulent Cascading Intelligence</a></li>
</ul>
</li>
</ul>



<p>Read the <a href="https://www.thetransmitter.org/wp-content/uploads/2025/10/BI-223-transcript-Raja.pdf" target="_blank" rel="noreferrer noopener">transcript</a>.</p>



<p>0:00 - Intro
4:55 - Affordances and neuroscience
13:46 - Motifs
39:41- Reconciling neuroscience and ecological psychology
1:07:55 - Predictive processing
1:15:32 - Resonance
1:23:00 - Biggest holes in ecological psychology
1:29:50 - Plant cognition</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/2169679/c1e-jjrra5k9x4fzo8vr-jpn32jv8h7ov-zirxah.mp3" length="95908136"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Support the show to get full episodes, full archive, and join the Discord community.









Vicente Raja is a research fellow at University of Murcia in Spain, where he is also part of the Minimal Intelligence Lab run by Paco Cavo, where they study plant behavior, and he is external affiliate faculty of the Rotman Institute of Philosophy at Western University. He is a philosopher, and he is a cognitive scientist, and he specializes in applying concepts from ecological psychology to understand how brains, and organisms, including plants, get about in the world.



We talk about many facets of his research, both philosophical and scientific, and maybe the best way to describe the conversation is a tour among many of the concepts in ecological psychology - like affordances, ecological information, direct perception, and resonance, and how those concepts do and don't, and should or shouldn’t, contribute to our understanding of brains and minds.



We also discuss Vicente's use of the term motif to describe scientific concepts that allow different researches to study roughly the same things even though they have different definitions for those things, and toward the end we touch on his work studying plant behavior.






MINT Lab.



Book: Ecological psychology



Social: @diovicen.bsky.social



Related papers

In search for an alternative to the computer metaphor of the mind and brain



Embodiment and cognitive neuroscience: the forgotten tales.



The motifs of radical embodied neuroscience



The Dynamics of Plant Nutation



Ecological Resonance Is Reflected in Human Brain Activity



Affordances are for life (and not just for maximizing reproductive fitness)



Two species of realism





Lots of previous guests and topics mentioned:

BI 152 Michael L. Anderson: After Phrenology: Neural Reuse



BI 190 Luis Favela: The Ecological Brain



BI 191 Damian Kelty-Stephen: Fractal Turbulent Cascading Intelligence






Read the transcript.



0:00 - Intro
4:55 - Affordances and neuroscience
13:46 - Motifs
39:41- Reconciling neuroscience and ecological psychology
1:07:55 - Predictive processing
1:15:32 - Resonance
1:23:00 - Biggest holes in ecological psychology
1:29:50 - Plant cognition]]>
                </itunes:summary>
                                                                            <itunes:duration>01:39:01</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 222 Nikolay Kukushkin: Minds and Meaning from Nature's Ideas]]>
                </title>
                <pubDate>Wed, 08 Oct 2025 04:00:00 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/2160665</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-222-nikolay-kukushkin-minds-and-meaning-from-natures-ideas</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes, full archive, and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p><em>The Transmitter</em> is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.</p>



<p>Read more about <a href="https://www.thetransmitter.org/partners/">our partnership</a>.</p>



<p>Sign up for <a href="https://www.thetransmitter.org/newsletters/">Brain Inspired email alerts</a> to be notified every time a new Brain Inspired episode is released.</p>



<p>To explore more neuroscience news and perspectives, visit <a href="http://thetransmitter.org">thetransmitter.org</a>.</p>



<p>Nikolay Kukushkin is an associate professor at New York University, and a senior scientist at Thomas Carew’s laboratory at the Center for Neural Science. He describes himself as a "molecular philosopher", owing to his day job as a molecular biologist and his broad perspective on how it "hangs together", in the words of Wilfrid Sellers, who in 1962 wrote, “The aim of philosophy, abstractly formulated, is to understand how things in the broadest possible sense of the term hang together in the broadest possible sense of the term”.</p>



<p>That is what Niko does in his book <a href="https://amzn.to/46WT6UO">One Hand Clapping: Unraveling the Mystery of the Human Mind</a>.</p>



<p>This book is about essences across spatial scales in nature. More precisely, it's about giving names to what is fundamental, or essential, to how things and processes function in nature. Niko argues those essences are where meaning resides. That's very abstract, and we'll spell it out more during the discussion. But as an example at the small scale, the essences of carbon and oxygen, respectively, are creation and destruction, which allows metabolism to occur in biological organisms. Moving way up the scale, following this essence perspective leads Niko to the conclusion that there is no separation between our minds and the world, and that instead we should embrace the relational aspect of mind and world as a unifying principle. On the way, via evolution, we discuss many more examples, plus some of his own work studying how memory works in individual cells, not just neurons or populations of neurons in brains.</p>



<ul class="wp-block-list">
<li><a href="https://www.nikolaykukushkin.com/about">Niko's website</a>.</li>



<li>Twitter: <a href="https://x.com/niko_kukushkin">@niko_kukushkin</a>.</li>



<li>Book:
<ul class="wp-block-list">
<li><a href="https://amzn.to/46WT6UO">One Hand Clapping: Unraveling the Mystery of the Human Mind</a></li>
</ul>
</li>
</ul>



<p><a href="https://www.thetransmitter.org/wp-content/uploads/2025/10/BI-222-transcript-kukushkin.pdf" target="_blank" rel="noreferrer noopener">Read the transcript</a>.</p>



<p>0:00 - Intro
9:28 - Studying memory in cells
10:14 - Who the book is for
17:57 - Studying memory in cells
21:53 - What is memory?
29:49 - Book
29:52 - How the book came about
37:56 - Central message of the book
44:07 - Meaning in nature
49:09 - Meaning and essence
51:55 - Multicellularity and ant colonies
57:43 - Eukaryotes and complexification
1:03:38 - Why do we have brains?
1:06:17 - Emergence
1:10:58 - Language
1:12:41 - Human evolution
1:14:41 - Artificial intelligence, meaning and essences
1:25:49 - Consciousness</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Support the show to get full episodes, full archive, and join the Discord community.









The Transmitter is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.



Read more about our partnership.



Sign up for Brain Inspired email alerts to be notified every time a new Brain Inspired episode is released.



To explore more neuroscience news and perspectives, visit thetransmitter.org.



Nikolay Kukushkin is an associate professor at New York University, and a senior scientist at Thomas Carew’s laboratory at the Center for Neural Science. He describes himself as a "molecular philosopher", owing to his day job as a molecular biologist and his broad perspective on how it "hangs together", in the words of Wilfrid Sellers, who in 1962 wrote, “The aim of philosophy, abstractly formulated, is to understand how things in the broadest possible sense of the term hang together in the broadest possible sense of the term”.



That is what Niko does in his book One Hand Clapping: Unraveling the Mystery of the Human Mind.



This book is about essences across spatial scales in nature. More precisely, it's about giving names to what is fundamental, or essential, to how things and processes function in nature. Niko argues those essences are where meaning resides. That's very abstract, and we'll spell it out more during the discussion. But as an example at the small scale, the essences of carbon and oxygen, respectively, are creation and destruction, which allows metabolism to occur in biological organisms. Moving way up the scale, following this essence perspective leads Niko to the conclusion that there is no separation between our minds and the world, and that instead we should embrace the relational aspect of mind and world as a unifying principle. On the way, via evolution, we discuss many more examples, plus some of his own work studying how memory works in individual cells, not just neurons or populations of neurons in brains.




Niko's website.



Twitter: @niko_kukushkin.



Book:

One Hand Clapping: Unraveling the Mystery of the Human Mind






Read the transcript.



0:00 - Intro
9:28 - Studying memory in cells
10:14 - Who the book is for
17:57 - Studying memory in cells
21:53 - What is memory?
29:49 - Book
29:52 - How the book came about
37:56 - Central message of the book
44:07 - Meaning in nature
49:09 - Meaning and essence
51:55 - Multicellularity and ant colonies
57:43 - Eukaryotes and complexification
1:03:38 - Why do we have brains?
1:06:17 - Emergence
1:10:58 - Language
1:12:41 - Human evolution
1:14:41 - Artificial intelligence, meaning and essences
1:25:49 - Consciousness]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 222 Nikolay Kukushkin: Minds and Meaning from Nature's Ideas]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes, full archive, and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p><em>The Transmitter</em> is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.</p>



<p>Read more about <a href="https://www.thetransmitter.org/partners/">our partnership</a>.</p>



<p>Sign up for <a href="https://www.thetransmitter.org/newsletters/">Brain Inspired email alerts</a> to be notified every time a new Brain Inspired episode is released.</p>



<p>To explore more neuroscience news and perspectives, visit <a href="http://thetransmitter.org">thetransmitter.org</a>.</p>



<p>Nikolay Kukushkin is an associate professor at New York University, and a senior scientist at Thomas Carew’s laboratory at the Center for Neural Science. He describes himself as a "molecular philosopher", owing to his day job as a molecular biologist and his broad perspective on how it "hangs together", in the words of Wilfrid Sellers, who in 1962 wrote, “The aim of philosophy, abstractly formulated, is to understand how things in the broadest possible sense of the term hang together in the broadest possible sense of the term”.</p>



<p>That is what Niko does in his book <a href="https://amzn.to/46WT6UO">One Hand Clapping: Unraveling the Mystery of the Human Mind</a>.</p>



<p>This book is about essences across spatial scales in nature. More precisely, it's about giving names to what is fundamental, or essential, to how things and processes function in nature. Niko argues those essences are where meaning resides. That's very abstract, and we'll spell it out more during the discussion. But as an example at the small scale, the essences of carbon and oxygen, respectively, are creation and destruction, which allows metabolism to occur in biological organisms. Moving way up the scale, following this essence perspective leads Niko to the conclusion that there is no separation between our minds and the world, and that instead we should embrace the relational aspect of mind and world as a unifying principle. On the way, via evolution, we discuss many more examples, plus some of his own work studying how memory works in individual cells, not just neurons or populations of neurons in brains.</p>



<ul class="wp-block-list">
<li><a href="https://www.nikolaykukushkin.com/about">Niko's website</a>.</li>



<li>Twitter: <a href="https://x.com/niko_kukushkin">@niko_kukushkin</a>.</li>



<li>Book:
<ul class="wp-block-list">
<li><a href="https://amzn.to/46WT6UO">One Hand Clapping: Unraveling the Mystery of the Human Mind</a></li>
</ul>
</li>
</ul>



<p><a href="https://www.thetransmitter.org/wp-content/uploads/2025/10/BI-222-transcript-kukushkin.pdf" target="_blank" rel="noreferrer noopener">Read the transcript</a>.</p>



<p>0:00 - Intro
9:28 - Studying memory in cells
10:14 - Who the book is for
17:57 - Studying memory in cells
21:53 - What is memory?
29:49 - Book
29:52 - How the book came about
37:56 - Central message of the book
44:07 - Meaning in nature
49:09 - Meaning and essence
51:55 - Multicellularity and ant colonies
57:43 - Eukaryotes and complexification
1:03:38 - Why do we have brains?
1:06:17 - Emergence
1:10:58 - Language
1:12:41 - Human evolution
1:14:41 - Artificial intelligence, meaning and essences
1:25:49 - Consciousness</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/2160665/c1e-d5wwam4ovzi5gn7j-dmx6v0vqizx-c8plfu.mp3" length="86248657"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Support the show to get full episodes, full archive, and join the Discord community.









The Transmitter is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.



Read more about our partnership.



Sign up for Brain Inspired email alerts to be notified every time a new Brain Inspired episode is released.



To explore more neuroscience news and perspectives, visit thetransmitter.org.



Nikolay Kukushkin is an associate professor at New York University, and a senior scientist at Thomas Carew’s laboratory at the Center for Neural Science. He describes himself as a "molecular philosopher", owing to his day job as a molecular biologist and his broad perspective on how it "hangs together", in the words of Wilfrid Sellers, who in 1962 wrote, “The aim of philosophy, abstractly formulated, is to understand how things in the broadest possible sense of the term hang together in the broadest possible sense of the term”.



That is what Niko does in his book One Hand Clapping: Unraveling the Mystery of the Human Mind.



This book is about essences across spatial scales in nature. More precisely, it's about giving names to what is fundamental, or essential, to how things and processes function in nature. Niko argues those essences are where meaning resides. That's very abstract, and we'll spell it out more during the discussion. But as an example at the small scale, the essences of carbon and oxygen, respectively, are creation and destruction, which allows metabolism to occur in biological organisms. Moving way up the scale, following this essence perspective leads Niko to the conclusion that there is no separation between our minds and the world, and that instead we should embrace the relational aspect of mind and world as a unifying principle. On the way, via evolution, we discuss many more examples, plus some of his own work studying how memory works in individual cells, not just neurons or populations of neurons in brains.




Niko's website.



Twitter: @niko_kukushkin.



Book:

One Hand Clapping: Unraveling the Mystery of the Human Mind






Read the transcript.



0:00 - Intro
9:28 - Studying memory in cells
10:14 - Who the book is for
17:57 - Studying memory in cells
21:53 - What is memory?
29:49 - Book
29:52 - How the book came about
37:56 - Central message of the book
44:07 - Meaning in nature
49:09 - Meaning and essence
51:55 - Multicellularity and ant colonies
57:43 - Eukaryotes and complexification
1:03:38 - Why do we have brains?
1:06:17 - Emergence
1:10:58 - Language
1:12:41 - Human evolution
1:14:41 - Artificial intelligence, meaning and essences
1:25:49 - Consciousness]]>
                </itunes:summary>
                                                                            <itunes:duration>01:28:26</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 221 Ann Kennedy: Theory Beneath the Cortical Surface]]>
                </title>
                <pubDate>Wed, 24 Sep 2025 04:00:00 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/2147979</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-221-ann-kennedy-theory-beneath-the-cortical-surface</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes, full archive, and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p>Ann Kennedy is Associate Professor at Scripps Research Institute and runs the <a href="https://www.kennedylab.org/">Laboratory for Theoretical Neuroscience and Behavior</a>.</p>



<p>Among other things, Ann has been studying how processes important in life, like survival, threat response, motivation, and pain, are mediated through subcortical brain areas like the hypothalamus. She also pays attention to the time course those life processes require, which has led her to consider how the expression of things like proteins help shape neural processes throughout the brain, so we can behave appropriately in those different contexts.</p>



<p>You'll hear us talk about how this is still a pretty open field in theoretical neuroscience, unlike the historically heavy use of theory in popular brain areas throughout the cortex, and the historically narrow focus on spikes or action potentials as the only game in town when it comes to neural computation. We discuss that and I link in the show notes to a commentary piece Ann wrote, in which she argues for both top-down and bottom-up theoretical approaches.</p>



<p>I also link to her papers about the early evolution of nervous systems, how heterogeneity or diversity of neurons is an advantage for neural computations, and we discuss a kaggle competition she developed to benchmark automated behavioral labels of behaving organisms, so that despite different researchers using different recording systems and setups, analyzing those data will produce consistent labels to better compare across labs and aggregated bigger and better data sets.</p>



<ul class="wp-block-list">
<li><a href="https://www.kennedylab.org/">Laboratory for Theoretical Neuroscience and Behavior</a>.</li>



<li>Social:
<ul class="wp-block-list">
<li><a href="https://bsky.app/profile/antihebbiann.bsky.social">@antihebbiann.bsky.social</a></li>



<li><a href="https://x.com/Antihebbiann">@Antihebbiann</a> </li>
</ul>
</li>



<li>The <a href="https://www.kaggle.com/competitions/MABe-mouse-behavior-detection">Kaggle competition Ann developed</a> to generalize behavior categorization.</li>



<li>Related papers<ul><li><a href="https://www.kennedylab.org/_files/ugd/680470_913f9d06566941a1bb03600464888e76.pdf">Dynamics of neural activity in early nervous system evolution</a>.</li><li><a href="https://www.nature.com/articles/s41583-025-00965-8">Theoretical neuroscience has room to grow</a>.</li></ul>
<ul class="wp-block-list">
<li><a href="https://www.pnas.org/doi/epub/10.1073/pnas.2311885121">Neural heterogeneity controls computations in spiking neural networks</a>.</li>



<li><a href="https://www.biorxiv.org/content/10.1101/2024.02.26.582069v1.full.pdf">A parabrachial hub for the prioritization of survival behavior</a>.</li>



<li><a href="https://www.cell.com/cell/fulltext/S0092-8674(22)01471-4">An approximate line attractor in the hypothalamus encodes an aggressive state</a>.</li>
</ul>
</li>
</ul>



<p>Read the <a href="https://www.thetransmitter.org/wp-content/uploads/2025/09/BI-221-transcript-kennedy.pdf" target="_blank" rel="noreferrer noopener">transcript</a>.</p>



<p>0:00 - Intro
3:36 - Why study subcortical areas?
13:30 - Evolution
15:06 - Dynamical systems and time scales
21:32 - NeuroAI
28:37 - Before there were brains
33:11 - Endogenous spontaneous activity
40:09 - Natural vs artificial
43:09 - Different is more - heterogeneity
45:32 - Neuromodulators and neuropeptide functions
55:47 - Heterogeneity: manifolds, subspaces, and gain
1:02:43 - Control knobs
1:09:45 - Theoretical neuroscience has room to grow
1:19:59 - Hypothalamus
1:20:57 - Subcortical vs "higher" cognition
1:24:53 - 4E cognition
1:26:56 - Behavior benchmarking
1:37:26 - Current challenges
1:39:...</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Support the show to get full episodes, full archive, and join the Discord community.









Ann Kennedy is Associate Professor at Scripps Research Institute and runs the Laboratory for Theoretical Neuroscience and Behavior.



Among other things, Ann has been studying how processes important in life, like survival, threat response, motivation, and pain, are mediated through subcortical brain areas like the hypothalamus. She also pays attention to the time course those life processes require, which has led her to consider how the expression of things like proteins help shape neural processes throughout the brain, so we can behave appropriately in those different contexts.



You'll hear us talk about how this is still a pretty open field in theoretical neuroscience, unlike the historically heavy use of theory in popular brain areas throughout the cortex, and the historically narrow focus on spikes or action potentials as the only game in town when it comes to neural computation. We discuss that and I link in the show notes to a commentary piece Ann wrote, in which she argues for both top-down and bottom-up theoretical approaches.



I also link to her papers about the early evolution of nervous systems, how heterogeneity or diversity of neurons is an advantage for neural computations, and we discuss a kaggle competition she developed to benchmark automated behavioral labels of behaving organisms, so that despite different researchers using different recording systems and setups, analyzing those data will produce consistent labels to better compare across labs and aggregated bigger and better data sets.




Laboratory for Theoretical Neuroscience and Behavior.



Social:

@antihebbiann.bsky.social



@Antihebbiann 





The Kaggle competition Ann developed to generalize behavior categorization.



Related papersDynamics of neural activity in early nervous system evolution.Theoretical neuroscience has room to grow.

Neural heterogeneity controls computations in spiking neural networks.



A parabrachial hub for the prioritization of survival behavior.



An approximate line attractor in the hypothalamus encodes an aggressive state.






Read the transcript.



0:00 - Intro
3:36 - Why study subcortical areas?
13:30 - Evolution
15:06 - Dynamical systems and time scales
21:32 - NeuroAI
28:37 - Before there were brains
33:11 - Endogenous spontaneous activity
40:09 - Natural vs artificial
43:09 - Different is more - heterogeneity
45:32 - Neuromodulators and neuropeptide functions
55:47 - Heterogeneity: manifolds, subspaces, and gain
1:02:43 - Control knobs
1:09:45 - Theoretical neuroscience has room to grow
1:19:59 - Hypothalamus
1:20:57 - Subcortical vs "higher" cognition
1:24:53 - 4E cognition
1:26:56 - Behavior benchmarking
1:37:26 - Current challenges
1:39:...]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 221 Ann Kennedy: Theory Beneath the Cortical Surface]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes, full archive, and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p>Ann Kennedy is Associate Professor at Scripps Research Institute and runs the <a href="https://www.kennedylab.org/">Laboratory for Theoretical Neuroscience and Behavior</a>.</p>



<p>Among other things, Ann has been studying how processes important in life, like survival, threat response, motivation, and pain, are mediated through subcortical brain areas like the hypothalamus. She also pays attention to the time course those life processes require, which has led her to consider how the expression of things like proteins help shape neural processes throughout the brain, so we can behave appropriately in those different contexts.</p>



<p>You'll hear us talk about how this is still a pretty open field in theoretical neuroscience, unlike the historically heavy use of theory in popular brain areas throughout the cortex, and the historically narrow focus on spikes or action potentials as the only game in town when it comes to neural computation. We discuss that and I link in the show notes to a commentary piece Ann wrote, in which she argues for both top-down and bottom-up theoretical approaches.</p>



<p>I also link to her papers about the early evolution of nervous systems, how heterogeneity or diversity of neurons is an advantage for neural computations, and we discuss a kaggle competition she developed to benchmark automated behavioral labels of behaving organisms, so that despite different researchers using different recording systems and setups, analyzing those data will produce consistent labels to better compare across labs and aggregated bigger and better data sets.</p>



<ul class="wp-block-list">
<li><a href="https://www.kennedylab.org/">Laboratory for Theoretical Neuroscience and Behavior</a>.</li>



<li>Social:
<ul class="wp-block-list">
<li><a href="https://bsky.app/profile/antihebbiann.bsky.social">@antihebbiann.bsky.social</a></li>



<li><a href="https://x.com/Antihebbiann">@Antihebbiann</a> </li>
</ul>
</li>



<li>The <a href="https://www.kaggle.com/competitions/MABe-mouse-behavior-detection">Kaggle competition Ann developed</a> to generalize behavior categorization.</li>



<li>Related papers<ul><li><a href="https://www.kennedylab.org/_files/ugd/680470_913f9d06566941a1bb03600464888e76.pdf">Dynamics of neural activity in early nervous system evolution</a>.</li><li><a href="https://www.nature.com/articles/s41583-025-00965-8">Theoretical neuroscience has room to grow</a>.</li></ul>
<ul class="wp-block-list">
<li><a href="https://www.pnas.org/doi/epub/10.1073/pnas.2311885121">Neural heterogeneity controls computations in spiking neural networks</a>.</li>



<li><a href="https://www.biorxiv.org/content/10.1101/2024.02.26.582069v1.full.pdf">A parabrachial hub for the prioritization of survival behavior</a>.</li>



<li><a href="https://www.cell.com/cell/fulltext/S0092-8674(22)01471-4">An approximate line attractor in the hypothalamus encodes an aggressive state</a>.</li>
</ul>
</li>
</ul>



<p>Read the <a href="https://www.thetransmitter.org/wp-content/uploads/2025/09/BI-221-transcript-kennedy.pdf" target="_blank" rel="noreferrer noopener">transcript</a>.</p>



<p>0:00 - Intro
3:36 - Why study subcortical areas?
13:30 - Evolution
15:06 - Dynamical systems and time scales
21:32 - NeuroAI
28:37 - Before there were brains
33:11 - Endogenous spontaneous activity
40:09 - Natural vs artificial
43:09 - Different is more - heterogeneity
45:32 - Neuromodulators and neuropeptide functions
55:47 - Heterogeneity: manifolds, subspaces, and gain
1:02:43 - Control knobs
1:09:45 - Theoretical neuroscience has room to grow
1:19:59 - Hypothalamus
1:20:57 - Subcortical vs "higher" cognition
1:24:53 - 4E cognition
1:26:56 - Behavior benchmarking
1:37:26 - Current challenges
1:39:46 - Advice to young researchers</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/2147979/c1e-jjrra5j711uo0346-mkjmdzk7s7dr-qjcekt.mp3" length="100934524"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Support the show to get full episodes, full archive, and join the Discord community.









Ann Kennedy is Associate Professor at Scripps Research Institute and runs the Laboratory for Theoretical Neuroscience and Behavior.



Among other things, Ann has been studying how processes important in life, like survival, threat response, motivation, and pain, are mediated through subcortical brain areas like the hypothalamus. She also pays attention to the time course those life processes require, which has led her to consider how the expression of things like proteins help shape neural processes throughout the brain, so we can behave appropriately in those different contexts.



You'll hear us talk about how this is still a pretty open field in theoretical neuroscience, unlike the historically heavy use of theory in popular brain areas throughout the cortex, and the historically narrow focus on spikes or action potentials as the only game in town when it comes to neural computation. We discuss that and I link in the show notes to a commentary piece Ann wrote, in which she argues for both top-down and bottom-up theoretical approaches.



I also link to her papers about the early evolution of nervous systems, how heterogeneity or diversity of neurons is an advantage for neural computations, and we discuss a kaggle competition she developed to benchmark automated behavioral labels of behaving organisms, so that despite different researchers using different recording systems and setups, analyzing those data will produce consistent labels to better compare across labs and aggregated bigger and better data sets.




Laboratory for Theoretical Neuroscience and Behavior.



Social:

@antihebbiann.bsky.social



@Antihebbiann 





The Kaggle competition Ann developed to generalize behavior categorization.



Related papersDynamics of neural activity in early nervous system evolution.Theoretical neuroscience has room to grow.

Neural heterogeneity controls computations in spiking neural networks.



A parabrachial hub for the prioritization of survival behavior.



An approximate line attractor in the hypothalamus encodes an aggressive state.






Read the transcript.



0:00 - Intro
3:36 - Why study subcortical areas?
13:30 - Evolution
15:06 - Dynamical systems and time scales
21:32 - NeuroAI
28:37 - Before there were brains
33:11 - Endogenous spontaneous activity
40:09 - Natural vs artificial
43:09 - Different is more - heterogeneity
45:32 - Neuromodulators and neuropeptide functions
55:47 - Heterogeneity: manifolds, subspaces, and gain
1:02:43 - Control knobs
1:09:45 - Theoretical neuroscience has room to grow
1:19:59 - Hypothalamus
1:20:57 - Subcortical vs "higher" cognition
1:24:53 - 4E cognition
1:26:56 - Behavior benchmarking
1:37:26 - Current challenges
1:39:...]]>
                </itunes:summary>
                                                                            <itunes:duration>01:43:37</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 220 Michael Breakspear and Mac Shine: Dynamic Systems from Neurons to Brains]]>
                </title>
                <pubDate>Wed, 10 Sep 2025 04:01:00 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/2137222</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-220-michael-breakspear-and-mac-shine-dynamic-systems-from-neurons-to-brains</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes, full archive, and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p><em>The Transmitter</em> is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.</p>



<p>Read more about <a href="https://www.thetransmitter.org/partners/">our partnership</a>: <a href="https://www.thetransmitter.org/partners/">https://www.thetransmitter.org/partners/</a></p>



<p>Sign up for the “Brain Inspired” email alerts to be notified every time a new “Brain Inspired” episode is released: <a href="https://www.thetransmitter.org/newsletters/">https://www.thetransmitter.org/newsletters/</a></p>



<p>To explore more neuroscience news and perspectives, visit <a href="http://thetransmitter.org">thetransmitter.org</a>.</p>



<p>What changes and what stays the same as you scale from single neurons up to local populations of neurons up to whole brains? How tuning parameters like the gain in some neural populations affects the dynamical and computational properties of the rest of the system.</p>



<p>Those are the main questions my guests today discuss. Michael Breakspear is a professor of Systems Neuroscience and runs the <a href="https://www.systemsneurosciencegroup.com/team">Systems Neuroscience Group</a> at the University of Newcastle in Australia. Mac Shine is back, he was here a few years ago. Mac runs the <a href="https://shine-lab.org/">Shine Lab</a> at the University of Sidney in Australia.</p>



<p>Michael and Mac have been collaborating on the questions I mentioned above, using a systems approach to studying brains and cognition. The short summary of what they discovered in their first collaboration is that turning up or down the gain across broad networks of neurons in the brain affects integration - working together - and segregation - working apart. They map this gain modulation on to the ascending arousal pathway, in which the locus coeruleus projects widely throughout the brain distributing noradrenaline. At a certain sweet spot of gain, integration and segregation are balanced near a bifurcation point, near criticality, which maximizes properties that are good for cognition.</p>



<p>In their recent collaboration, they used a coarse graining procedure inspired by physics to study the collective dynamics of various sizes of neural populations, going from single neurons to large populations of neurons. Here they found that despite different coding properties at different scales, there are also scale-free properties that suggest neural populations of all sizes, from single neurons to brains, can do cognitive stuff useful for the organism. And they found this is a conserved property across many different species, suggesting it's a universal principle of brain dynamics in general.</p>



<p>So we discuss all that, but to get there we talk about what a systems approach to neuroscience is, how systems neuroscience has changed over the years, and how it has inspired the questions Michael and Mac ask.</p>



<ul class="wp-block-list">
<li>Breakspear: <a href="https://www.systemsneurosciencegroup.com/team">Systems Neuroscience Group</a>.
<ul class="wp-block-list">
<li><a href="https://x.com/DrBreaky">@DrBreaky</a>.</li>
</ul>
</li>



<li>Shine: <a href="https://shine-lab.org/">Shine Lab</a>.
<ul class="wp-block-list">
<li><a href="https://x.com/jmacshine">@jmacshine</a>.</li>
</ul>
</li>



<li>Related papers
<ul class="wp-block-list">
<li><a href="https://www.ngds-ku.org/Papers/J61/Breakspear.pdf">Dynamic models of large-scale brain activity</a></li>



<li><a href="https://www.nature.com/articles/s41467-019-08999-0">Metastable brain waves</a></li>



<li>...</li></ul></li></ul>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Support the show to get full episodes, full archive, and join the Discord community.









The Transmitter is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.



Read more about our partnership: https://www.thetransmitter.org/partners/



Sign up for the “Brain Inspired” email alerts to be notified every time a new “Brain Inspired” episode is released: https://www.thetransmitter.org/newsletters/



To explore more neuroscience news and perspectives, visit thetransmitter.org.



What changes and what stays the same as you scale from single neurons up to local populations of neurons up to whole brains? How tuning parameters like the gain in some neural populations affects the dynamical and computational properties of the rest of the system.



Those are the main questions my guests today discuss. Michael Breakspear is a professor of Systems Neuroscience and runs the Systems Neuroscience Group at the University of Newcastle in Australia. Mac Shine is back, he was here a few years ago. Mac runs the Shine Lab at the University of Sidney in Australia.



Michael and Mac have been collaborating on the questions I mentioned above, using a systems approach to studying brains and cognition. The short summary of what they discovered in their first collaboration is that turning up or down the gain across broad networks of neurons in the brain affects integration - working together - and segregation - working apart. They map this gain modulation on to the ascending arousal pathway, in which the locus coeruleus projects widely throughout the brain distributing noradrenaline. At a certain sweet spot of gain, integration and segregation are balanced near a bifurcation point, near criticality, which maximizes properties that are good for cognition.



In their recent collaboration, they used a coarse graining procedure inspired by physics to study the collective dynamics of various sizes of neural populations, going from single neurons to large populations of neurons. Here they found that despite different coding properties at different scales, there are also scale-free properties that suggest neural populations of all sizes, from single neurons to brains, can do cognitive stuff useful for the organism. And they found this is a conserved property across many different species, suggesting it's a universal principle of brain dynamics in general.



So we discuss all that, but to get there we talk about what a systems approach to neuroscience is, how systems neuroscience has changed over the years, and how it has inspired the questions Michael and Mac ask.




Breakspear: Systems Neuroscience Group.

@DrBreaky.





Shine: Shine Lab.

@jmacshine.





Related papers

Dynamic models of large-scale brain activity



Metastable brain waves



...]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 220 Michael Breakspear and Mac Shine: Dynamic Systems from Neurons to Brains]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes, full archive, and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p><em>The Transmitter</em> is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.</p>



<p>Read more about <a href="https://www.thetransmitter.org/partners/">our partnership</a>: <a href="https://www.thetransmitter.org/partners/">https://www.thetransmitter.org/partners/</a></p>



<p>Sign up for the “Brain Inspired” email alerts to be notified every time a new “Brain Inspired” episode is released: <a href="https://www.thetransmitter.org/newsletters/">https://www.thetransmitter.org/newsletters/</a></p>



<p>To explore more neuroscience news and perspectives, visit <a href="http://thetransmitter.org">thetransmitter.org</a>.</p>



<p>What changes and what stays the same as you scale from single neurons up to local populations of neurons up to whole brains? How tuning parameters like the gain in some neural populations affects the dynamical and computational properties of the rest of the system.</p>



<p>Those are the main questions my guests today discuss. Michael Breakspear is a professor of Systems Neuroscience and runs the <a href="https://www.systemsneurosciencegroup.com/team">Systems Neuroscience Group</a> at the University of Newcastle in Australia. Mac Shine is back, he was here a few years ago. Mac runs the <a href="https://shine-lab.org/">Shine Lab</a> at the University of Sidney in Australia.</p>



<p>Michael and Mac have been collaborating on the questions I mentioned above, using a systems approach to studying brains and cognition. The short summary of what they discovered in their first collaboration is that turning up or down the gain across broad networks of neurons in the brain affects integration - working together - and segregation - working apart. They map this gain modulation on to the ascending arousal pathway, in which the locus coeruleus projects widely throughout the brain distributing noradrenaline. At a certain sweet spot of gain, integration and segregation are balanced near a bifurcation point, near criticality, which maximizes properties that are good for cognition.</p>



<p>In their recent collaboration, they used a coarse graining procedure inspired by physics to study the collective dynamics of various sizes of neural populations, going from single neurons to large populations of neurons. Here they found that despite different coding properties at different scales, there are also scale-free properties that suggest neural populations of all sizes, from single neurons to brains, can do cognitive stuff useful for the organism. And they found this is a conserved property across many different species, suggesting it's a universal principle of brain dynamics in general.</p>



<p>So we discuss all that, but to get there we talk about what a systems approach to neuroscience is, how systems neuroscience has changed over the years, and how it has inspired the questions Michael and Mac ask.</p>



<ul class="wp-block-list">
<li>Breakspear: <a href="https://www.systemsneurosciencegroup.com/team">Systems Neuroscience Group</a>.
<ul class="wp-block-list">
<li><a href="https://x.com/DrBreaky">@DrBreaky</a>.</li>
</ul>
</li>



<li>Shine: <a href="https://shine-lab.org/">Shine Lab</a>.
<ul class="wp-block-list">
<li><a href="https://x.com/jmacshine">@jmacshine</a>.</li>
</ul>
</li>



<li>Related papers
<ul class="wp-block-list">
<li><a href="https://www.ngds-ku.org/Papers/J61/Breakspear.pdf">Dynamic models of large-scale brain activity</a></li>



<li><a href="https://www.nature.com/articles/s41467-019-08999-0">Metastable brain waves</a></li>



<li><a href="https://elifesciences.org/articles/31130">The modulation of neural gain facilitates a transition between functional segregation and integration in the brain</a></li>



<li><a href="https://shine-lab.org/wp-content/uploads/2024/10/2024_cell.pdf">Multiscale Organization of Neuronal Activity Unifies Scale-Dependent Theories of Brain Function</a>.</li>



<li><a href="https://shine-lab.org/wp-content/uploads/2025/03/2025_curropin.pdf">The brain that controls itself</a>.</li>



<li><a href="https://ccs.fau.edu/hbblab/pdfs/2024_Hancock_Kelso_NRN.pdf">Metastability demystified — the foundational past, the pragmatic present and the promising future</a>.</li>



<li><a href="https://direct.mit.edu/imag/article/doi/10.1162/IMAG.a.71/131445">Generation of surrogate brain maps preserving spatial autocorrelation through random rotation of geometric eigenmodes</a>.</li>
</ul>
</li>



<li>Related episodes
<ul class="wp-block-list">
<li><a href="https://braininspired.co/podcast/212/">BI 212 John Beggs: Why Brains Seek the Edge of Chaos</a></li>



<li><a href="https://braininspired.co/podcast/216/">BI 216 Woodrow Shew and Keith Hengen: The Nature of Brain Criticality</a></li>



<li><a href="https://braininspired.co/podcast/121/">BI 121 Mac Shine: Systems Neurobiology</a></li>
</ul>
</li>
</ul>



<p><a href="https://www.thetransmitter.org/wp-content/uploads/2025/09/BI-220-transcript-breakspear-shine-1.pdf" target="_blank" rel="noreferrer noopener">Read the transcript.</a></p>



<p>0:00 - Intro
4:28 - Neuroscience vs neurobiology
8:01 - Systems approach
26:52 - Physics for neuroscience
33:15 - Gain and bifurcation: earliest collaboration
55:32 - Multiscale organization
1:17:54 - Roadblocks</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/2137222/c1e-n4ggidoxonb2r9r2-z3kg243gcxk2-jzurhk.mp3" length="82402417"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Support the show to get full episodes, full archive, and join the Discord community.









The Transmitter is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.



Read more about our partnership: https://www.thetransmitter.org/partners/



Sign up for the “Brain Inspired” email alerts to be notified every time a new “Brain Inspired” episode is released: https://www.thetransmitter.org/newsletters/



To explore more neuroscience news and perspectives, visit thetransmitter.org.



What changes and what stays the same as you scale from single neurons up to local populations of neurons up to whole brains? How tuning parameters like the gain in some neural populations affects the dynamical and computational properties of the rest of the system.



Those are the main questions my guests today discuss. Michael Breakspear is a professor of Systems Neuroscience and runs the Systems Neuroscience Group at the University of Newcastle in Australia. Mac Shine is back, he was here a few years ago. Mac runs the Shine Lab at the University of Sidney in Australia.



Michael and Mac have been collaborating on the questions I mentioned above, using a systems approach to studying brains and cognition. The short summary of what they discovered in their first collaboration is that turning up or down the gain across broad networks of neurons in the brain affects integration - working together - and segregation - working apart. They map this gain modulation on to the ascending arousal pathway, in which the locus coeruleus projects widely throughout the brain distributing noradrenaline. At a certain sweet spot of gain, integration and segregation are balanced near a bifurcation point, near criticality, which maximizes properties that are good for cognition.



In their recent collaboration, they used a coarse graining procedure inspired by physics to study the collective dynamics of various sizes of neural populations, going from single neurons to large populations of neurons. Here they found that despite different coding properties at different scales, there are also scale-free properties that suggest neural populations of all sizes, from single neurons to brains, can do cognitive stuff useful for the organism. And they found this is a conserved property across many different species, suggesting it's a universal principle of brain dynamics in general.



So we discuss all that, but to get there we talk about what a systems approach to neuroscience is, how systems neuroscience has changed over the years, and how it has inspired the questions Michael and Mac ask.




Breakspear: Systems Neuroscience Group.

@DrBreaky.





Shine: Shine Lab.

@jmacshine.





Related papers

Dynamic models of large-scale brain activity



Metastable brain waves



...]]>
                </itunes:summary>
                                                                            <itunes:duration>01:25:05</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 219 Xaq Pitkow: Principles and Constraints of Cognition]]>
                </title>
                <pubDate>Wed, 27 Aug 2025 04:00:00 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/2124635</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-219-xaq-pitkow-principles-and-constraints-of-cognition</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes, full archive, and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p><em>The Transmitter</em> is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.</p>



<p>Read more about <a href="https://www.thetransmitter.org/partners/">our partnership</a>.</p>



<p>Sign up for <a href="https://www.thetransmitter.org/newsletters/">Brain Inspired email alerts</a> to be notified every time a new Brain Inspired episode is released.</p>



<p>To explore more neuroscience news and perspectives, visit <a href="http://thetransmitter.org">thetransmitter.org</a>.</p>



<p>Xaq Pitkow runs the <a href="https://xaqlab.com/">Lab for the Algorithmic Brain</a> at Carnegie Mellon University. The main theme of our discussion is how Xaq approaches his research into cognition by way of principles, from which his questions and models and methods spring forth. We discuss those principles, and In that light, we discuss some of his specific lines of work and ideas on the theoretical side of trying understand and explain a slew of cognitive processes. A few of the specifics we discuss are:</p>



<ul class="wp-block-list">
<li>How when we present tasks for organisms to solve, they use strategies that are suboptimal relative to the task, but nearly optimal relative to their beliefs about what they need to do - something Xaq calls inverse rational control.</li>



<li>Probabilistic graph networks.</li>



<li>How brains use probabilities to compute.</li>



<li>A new ecological neuroscience project Xaq has started with multiple collaborators.</li>
</ul>



<ul class="wp-block-list">
<li><a href="https://xaqlab.com/">LAB: Lab for the Algorithmic Brain</a>.</li>



<li>Related papers
<ul class="wp-block-list">
<li><a href="https://arxiv.org/pdf/2409.02709">How does the brain compute with probabilities?</a></li>



<li><a href="https://www.pnas.org/doi/pdf/10.1073/pnas.1912336117">Rational thoughts in neural codes.</a></li>



<li><a href="https://pmc.ncbi.nlm.nih.gov/articles/PMC11581108/">Control when confidence is costly</a></li>



<li><a href="https://link.springer.com/article/10.1007/s41468-023-00147-4">Generalization of graph network inferences in higher-order graphical models</a>.</li>



<li><a href="https://arxiv.org/pdf/2501.07440">Attention when you need</a>.</li>
</ul>
</li>
</ul>



<p>Read the <a href="https://www.thetransmitter.org/wp-content/uploads/2025/08/BI-219-transcript-pitkow.pdf" target="_blank" rel="noreferrer noopener">transcript</a>.</p>



<p>0:00 - Intro
3:57 - Xaq's approach
8:28 - Inverse rational control
19:19 - Space of input-output functions
24:48 - Cognition for cognition
27:35 - Theory vs. experiment
40:32 - How does the brain compute with probabilities?
1:03:57 - Normative vs kludge
1:07:44 - Ecological neuroscience
1:20:47 - Representations
1:29:34 - Current projects
1:36:04 - Need a synaptome
1:42:20 - Across scales</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Support the show to get full episodes, full archive, and join the Discord community.









The Transmitter is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.



Read more about our partnership.



Sign up for Brain Inspired email alerts to be notified every time a new Brain Inspired episode is released.



To explore more neuroscience news and perspectives, visit thetransmitter.org.



Xaq Pitkow runs the Lab for the Algorithmic Brain at Carnegie Mellon University. The main theme of our discussion is how Xaq approaches his research into cognition by way of principles, from which his questions and models and methods spring forth. We discuss those principles, and In that light, we discuss some of his specific lines of work and ideas on the theoretical side of trying understand and explain a slew of cognitive processes. A few of the specifics we discuss are:




How when we present tasks for organisms to solve, they use strategies that are suboptimal relative to the task, but nearly optimal relative to their beliefs about what they need to do - something Xaq calls inverse rational control.



Probabilistic graph networks.



How brains use probabilities to compute.



A new ecological neuroscience project Xaq has started with multiple collaborators.





LAB: Lab for the Algorithmic Brain.



Related papers

How does the brain compute with probabilities?



Rational thoughts in neural codes.



Control when confidence is costly



Generalization of graph network inferences in higher-order graphical models.



Attention when you need.






Read the transcript.



0:00 - Intro
3:57 - Xaq's approach
8:28 - Inverse rational control
19:19 - Space of input-output functions
24:48 - Cognition for cognition
27:35 - Theory vs. experiment
40:32 - How does the brain compute with probabilities?
1:03:57 - Normative vs kludge
1:07:44 - Ecological neuroscience
1:20:47 - Representations
1:29:34 - Current projects
1:36:04 - Need a synaptome
1:42:20 - Across scales]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 219 Xaq Pitkow: Principles and Constraints of Cognition]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes, full archive, and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p><em>The Transmitter</em> is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.</p>



<p>Read more about <a href="https://www.thetransmitter.org/partners/">our partnership</a>.</p>



<p>Sign up for <a href="https://www.thetransmitter.org/newsletters/">Brain Inspired email alerts</a> to be notified every time a new Brain Inspired episode is released.</p>



<p>To explore more neuroscience news and perspectives, visit <a href="http://thetransmitter.org">thetransmitter.org</a>.</p>



<p>Xaq Pitkow runs the <a href="https://xaqlab.com/">Lab for the Algorithmic Brain</a> at Carnegie Mellon University. The main theme of our discussion is how Xaq approaches his research into cognition by way of principles, from which his questions and models and methods spring forth. We discuss those principles, and In that light, we discuss some of his specific lines of work and ideas on the theoretical side of trying understand and explain a slew of cognitive processes. A few of the specifics we discuss are:</p>



<ul class="wp-block-list">
<li>How when we present tasks for organisms to solve, they use strategies that are suboptimal relative to the task, but nearly optimal relative to their beliefs about what they need to do - something Xaq calls inverse rational control.</li>



<li>Probabilistic graph networks.</li>



<li>How brains use probabilities to compute.</li>



<li>A new ecological neuroscience project Xaq has started with multiple collaborators.</li>
</ul>



<ul class="wp-block-list">
<li><a href="https://xaqlab.com/">LAB: Lab for the Algorithmic Brain</a>.</li>



<li>Related papers
<ul class="wp-block-list">
<li><a href="https://arxiv.org/pdf/2409.02709">How does the brain compute with probabilities?</a></li>



<li><a href="https://www.pnas.org/doi/pdf/10.1073/pnas.1912336117">Rational thoughts in neural codes.</a></li>



<li><a href="https://pmc.ncbi.nlm.nih.gov/articles/PMC11581108/">Control when confidence is costly</a></li>



<li><a href="https://link.springer.com/article/10.1007/s41468-023-00147-4">Generalization of graph network inferences in higher-order graphical models</a>.</li>



<li><a href="https://arxiv.org/pdf/2501.07440">Attention when you need</a>.</li>
</ul>
</li>
</ul>



<p>Read the <a href="https://www.thetransmitter.org/wp-content/uploads/2025/08/BI-219-transcript-pitkow.pdf" target="_blank" rel="noreferrer noopener">transcript</a>.</p>



<p>0:00 - Intro
3:57 - Xaq's approach
8:28 - Inverse rational control
19:19 - Space of input-output functions
24:48 - Cognition for cognition
27:35 - Theory vs. experiment
40:32 - How does the brain compute with probabilities?
1:03:57 - Normative vs kludge
1:07:44 - Ecological neuroscience
1:20:47 - Representations
1:29:34 - Current projects
1:36:04 - Need a synaptome
1:42:20 - Across scales</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/2124635/c1e-o311c2x4opbvv59d-z3kmnjgragwj-wmqwga.mp3" length="104065344"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Support the show to get full episodes, full archive, and join the Discord community.









The Transmitter is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.



Read more about our partnership.



Sign up for Brain Inspired email alerts to be notified every time a new Brain Inspired episode is released.



To explore more neuroscience news and perspectives, visit thetransmitter.org.



Xaq Pitkow runs the Lab for the Algorithmic Brain at Carnegie Mellon University. The main theme of our discussion is how Xaq approaches his research into cognition by way of principles, from which his questions and models and methods spring forth. We discuss those principles, and In that light, we discuss some of his specific lines of work and ideas on the theoretical side of trying understand and explain a slew of cognitive processes. A few of the specifics we discuss are:




How when we present tasks for organisms to solve, they use strategies that are suboptimal relative to the task, but nearly optimal relative to their beliefs about what they need to do - something Xaq calls inverse rational control.



Probabilistic graph networks.



How brains use probabilities to compute.



A new ecological neuroscience project Xaq has started with multiple collaborators.





LAB: Lab for the Algorithmic Brain.



Related papers

How does the brain compute with probabilities?



Rational thoughts in neural codes.



Control when confidence is costly



Generalization of graph network inferences in higher-order graphical models.



Attention when you need.






Read the transcript.



0:00 - Intro
3:57 - Xaq's approach
8:28 - Inverse rational control
19:19 - Space of input-output functions
24:48 - Cognition for cognition
27:35 - Theory vs. experiment
40:32 - How does the brain compute with probabilities?
1:03:57 - Normative vs kludge
1:07:44 - Ecological neuroscience
1:20:47 - Representations
1:29:34 - Current projects
1:36:04 - Need a synaptome
1:42:20 - Across scales]]>
                </itunes:summary>
                                                                            <itunes:duration>01:47:11</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 218 Chris Rozell: Brain Stimulation and AI for Mental Disorders]]>
                </title>
                <pubDate>Wed, 13 Aug 2025 10:44:13 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/2111377</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-218-chris-rozell-brain-stimulation-and-ai-for-mental-disorders</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes, full archive, and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p>We are in an exciting time in the cross-fertilization of the neurotech industry and the cognitive sciences. My guest today is Chris Rozell, who sits in that space that connects neurotech and brain research. Chris runs the <a href="https://siplab.gatech.edu/index.html">Structured Information for Precision Neuroengineering Lab</a> at Georgia Tech University, and he was just named the inaugural director of Georgia Tech’s <a href="https://neuro.gatech.edu/">Institute for Neuroscience, Neurotechnology, and Society</a>. I think this is the first time on brain inspired we've discussed stimulating brains to treat mental disorders. I think. Today we talk about Chris's work establishing a biomarker from brain recordings of patients with treatment resistant depression, a specific form of depression. These are patients who have deep brain stimulation electrodes implanted in an effort to treat their depression. Chris and his team used that stimulation in conjunction with brain recordings and machine learning tools to predict how effective the treatment will be under what circumstances, and so on, to help psychiatrists better treat their patients. We'll get into the details and surrounding issues. Toward the end we also talk about Chris's unique background and path and approach, and why he thinks interdisciplinary research is so important. He's one of the most genuinely well intentioned people I've met, and I hope you're inspired by his research and his story.</p>



<ul class="wp-block-list">
<li><a href="https://siplab.gatech.edu/index.html">Structured Information for Precision Neuroengineering Lab</a>.</li>



<li>Twitter: <a href="https://x.com/crozSciTech">@crozSciTech</a>.</li>



<li>Related papers
<ul class="wp-block-list">
<li><a href="https://go.nature.com/48lmlzC">Cingulate dynamics track depression recovery with deep brain stimulation</a>.</li>
</ul>
</li>



<li><a href="https://www.storycollider.org/stories/2025/7/11/wired-lives-stories-about-brain-computer-interfaces">Story Collider: Wired Lives</a></li>
</ul>



<p>0:00 - Intro
3:20 - Overview of the study
17:11 - Closed and open loop stimulation
19:34 - Predicting recovery
28:45 - Control knob for treatment
39:04 - Historical and modern brain stimulation
49:07 - Treatment resistant depression
53:44 - Control nodes complex systems
1:01:06 - Explainable generative AI for a biomarker
1:16:40 - Where are we and what are the obstacles?
1:21:32 - Interface Neuro
1:24:55 - Why Chris cares</p>



<p>Read the <a href="https://www.thetransmitter.org/wp-content/uploads/2025/08/BI-218-transcript-production.pdf" target="_blank" rel="noreferrer noopener">transcript</a>.</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Support the show to get full episodes, full archive, and join the Discord community.









We are in an exciting time in the cross-fertilization of the neurotech industry and the cognitive sciences. My guest today is Chris Rozell, who sits in that space that connects neurotech and brain research. Chris runs the Structured Information for Precision Neuroengineering Lab at Georgia Tech University, and he was just named the inaugural director of Georgia Tech’s Institute for Neuroscience, Neurotechnology, and Society. I think this is the first time on brain inspired we've discussed stimulating brains to treat mental disorders. I think. Today we talk about Chris's work establishing a biomarker from brain recordings of patients with treatment resistant depression, a specific form of depression. These are patients who have deep brain stimulation electrodes implanted in an effort to treat their depression. Chris and his team used that stimulation in conjunction with brain recordings and machine learning tools to predict how effective the treatment will be under what circumstances, and so on, to help psychiatrists better treat their patients. We'll get into the details and surrounding issues. Toward the end we also talk about Chris's unique background and path and approach, and why he thinks interdisciplinary research is so important. He's one of the most genuinely well intentioned people I've met, and I hope you're inspired by his research and his story.




Structured Information for Precision Neuroengineering Lab.



Twitter: @crozSciTech.



Related papers

Cingulate dynamics track depression recovery with deep brain stimulation.





Story Collider: Wired Lives




0:00 - Intro
3:20 - Overview of the study
17:11 - Closed and open loop stimulation
19:34 - Predicting recovery
28:45 - Control knob for treatment
39:04 - Historical and modern brain stimulation
49:07 - Treatment resistant depression
53:44 - Control nodes complex systems
1:01:06 - Explainable generative AI for a biomarker
1:16:40 - Where are we and what are the obstacles?
1:21:32 - Interface Neuro
1:24:55 - Why Chris cares



Read the transcript.]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 218 Chris Rozell: Brain Stimulation and AI for Mental Disorders]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes, full archive, and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p>We are in an exciting time in the cross-fertilization of the neurotech industry and the cognitive sciences. My guest today is Chris Rozell, who sits in that space that connects neurotech and brain research. Chris runs the <a href="https://siplab.gatech.edu/index.html">Structured Information for Precision Neuroengineering Lab</a> at Georgia Tech University, and he was just named the inaugural director of Georgia Tech’s <a href="https://neuro.gatech.edu/">Institute for Neuroscience, Neurotechnology, and Society</a>. I think this is the first time on brain inspired we've discussed stimulating brains to treat mental disorders. I think. Today we talk about Chris's work establishing a biomarker from brain recordings of patients with treatment resistant depression, a specific form of depression. These are patients who have deep brain stimulation electrodes implanted in an effort to treat their depression. Chris and his team used that stimulation in conjunction with brain recordings and machine learning tools to predict how effective the treatment will be under what circumstances, and so on, to help psychiatrists better treat their patients. We'll get into the details and surrounding issues. Toward the end we also talk about Chris's unique background and path and approach, and why he thinks interdisciplinary research is so important. He's one of the most genuinely well intentioned people I've met, and I hope you're inspired by his research and his story.</p>



<ul class="wp-block-list">
<li><a href="https://siplab.gatech.edu/index.html">Structured Information for Precision Neuroengineering Lab</a>.</li>



<li>Twitter: <a href="https://x.com/crozSciTech">@crozSciTech</a>.</li>



<li>Related papers
<ul class="wp-block-list">
<li><a href="https://go.nature.com/48lmlzC">Cingulate dynamics track depression recovery with deep brain stimulation</a>.</li>
</ul>
</li>



<li><a href="https://www.storycollider.org/stories/2025/7/11/wired-lives-stories-about-brain-computer-interfaces">Story Collider: Wired Lives</a></li>
</ul>



<p>0:00 - Intro
3:20 - Overview of the study
17:11 - Closed and open loop stimulation
19:34 - Predicting recovery
28:45 - Control knob for treatment
39:04 - Historical and modern brain stimulation
49:07 - Treatment resistant depression
53:44 - Control nodes complex systems
1:01:06 - Explainable generative AI for a biomarker
1:16:40 - Where are we and what are the obstacles?
1:21:32 - Interface Neuro
1:24:55 - Why Chris cares</p>



<p>Read the <a href="https://www.thetransmitter.org/wp-content/uploads/2025/08/BI-218-transcript-production.pdf" target="_blank" rel="noreferrer noopener">transcript</a>.</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/2111377/c1e-rdvvfwd0rxagzmo7-7z9zq456sq8o-uhpi9i.mp3" length="103553466"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Support the show to get full episodes, full archive, and join the Discord community.









We are in an exciting time in the cross-fertilization of the neurotech industry and the cognitive sciences. My guest today is Chris Rozell, who sits in that space that connects neurotech and brain research. Chris runs the Structured Information for Precision Neuroengineering Lab at Georgia Tech University, and he was just named the inaugural director of Georgia Tech’s Institute for Neuroscience, Neurotechnology, and Society. I think this is the first time on brain inspired we've discussed stimulating brains to treat mental disorders. I think. Today we talk about Chris's work establishing a biomarker from brain recordings of patients with treatment resistant depression, a specific form of depression. These are patients who have deep brain stimulation electrodes implanted in an effort to treat their depression. Chris and his team used that stimulation in conjunction with brain recordings and machine learning tools to predict how effective the treatment will be under what circumstances, and so on, to help psychiatrists better treat their patients. We'll get into the details and surrounding issues. Toward the end we also talk about Chris's unique background and path and approach, and why he thinks interdisciplinary research is so important. He's one of the most genuinely well intentioned people I've met, and I hope you're inspired by his research and his story.




Structured Information for Precision Neuroengineering Lab.



Twitter: @crozSciTech.



Related papers

Cingulate dynamics track depression recovery with deep brain stimulation.





Story Collider: Wired Lives




0:00 - Intro
3:20 - Overview of the study
17:11 - Closed and open loop stimulation
19:34 - Predicting recovery
28:45 - Control knob for treatment
39:04 - Historical and modern brain stimulation
49:07 - Treatment resistant depression
53:44 - Control nodes complex systems
1:01:06 - Explainable generative AI for a biomarker
1:16:40 - Where are we and what are the obstacles?
1:21:32 - Interface Neuro
1:24:55 - Why Chris cares



Read the transcript.]]>
                </itunes:summary>
                                                                            <itunes:duration>01:46:39</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 217 Jennifer Prendki: Consciousness, Life, AI, and Quantum Physics]]>
                </title>
                <pubDate>Wed, 30 Jul 2025 04:00:00 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/2099885</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-217-jennifer-prendki-consciousness-life-ai-anvuw</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes, full archive, and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p><em>The Transmitter</em> is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.</p>



<p>Read more about <a href="https://www.thetransmitter.org/partners/">our partnership</a>.</p>



<p>Sign up for <a href="https://www.thetransmitter.org/newsletters/">Brain Inspired email alerts</a> to be notified every time a new Brain Inspired episode is released.</p>



<p>To explore more neuroscience news and perspectives, visit <a href="http://thetransmitter.org">thetransmitter.org</a>.</p>



<p>Do AI engineers need to emulate some processes and features found only in living organisms at the moment, like how brains are inextricably integrated with bodies? Is consciousness necessary for AI entities if we want them to play nice with us? Is quantum physics part of that story, or a key part, or <em>the</em> key part? Jennifer Prendki believes if we continue to scale AI, it will get us more of the same of what we have today, and that we should look to biology, life, and possibly consciousness to enhance AI. Jennifer is a former particle physicist turned entrepreneur and AI expert, focusing on curating the right kinds and forms of data to train AI, and in that vein she led those efforts at Deepmind on the foundation models ubiquitous in our lives now.</p>



<p>I was curious why someone with that background would come to the conclusion that AI needs inspiration from life, biology, and consciousness to move forward gracefully, and that it would be useful to better understand those processes in ourselves before trying to build what some people call AGI, whatever that is. Her perspective is a rarity among her cohorts, which we also discuss. And get this: she's interested in these topics because she cares about what happens to the planet and to us as a species. Perhaps also a rarity among those charging ahead to dominate profits and win the race</p>



<ul class="wp-block-list">
<li>Jennifer's website: <a href="https://www.quantumofdata.com/">Quantum of Data</a>.</li>



<li>The blog posts we discuss:
<ul class="wp-block-list">
<li><a href="https://www.quantumofdata.com/blog-posts/the-myth-of-emergence">The Myth of Emergence</a></li>



<li><a href="https://www.quantumofdata.com/blog-posts/embodiment-sentience-why-the-body-still-matters">Embodiment &amp; Sentience: Why the Body still Matters</a></li>



<li><a href="https://www.quantumofdata.com/blog-posts/the-architecture-of-synthetic-consciousness">The Architecture of Synthetic Consciousness</a></li>



<li><a href="https://www.quantumofdata.com/blog-posts/on-time-and-consciousness">On Time and Consciousness</a></li>



<li><a href="https://www.quantumofdata.com/blog-posts/superalignment-and-the-question-of-ai-personhood">Superalignment and the Question of AI Personhood</a>.</li>
</ul>
</li>
</ul>



<p>Read the <a href="https://www.thetransmitter.org/wp-content/uploads/2025/07/FINAL_BI-217-transcript-prendki.pdf" target="_blank" rel="noreferrer noopener">transcript</a>.</p>



<p>0:00 - Intro
3:25 - Jennifer's background
13:10 - Consciousness
16:38 - Life and consciousness
23:16 - Superalignment
40:11 - Quantum
1:04:45 - Wetware and biological mimicry
1:15:03 - Neural interfaces
1:16:48 - AI ethics
1:2:35 - AI models are not models
1:27:13 - What scaling will get us
1:39:53 - Current roadblocks
1:43:19 - Philosophy</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Support the show to get full episodes, full archive, and join the Discord community.









The Transmitter is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.



Read more about our partnership.



Sign up for Brain Inspired email alerts to be notified every time a new Brain Inspired episode is released.



To explore more neuroscience news and perspectives, visit thetransmitter.org.



Do AI engineers need to emulate some processes and features found only in living organisms at the moment, like how brains are inextricably integrated with bodies? Is consciousness necessary for AI entities if we want them to play nice with us? Is quantum physics part of that story, or a key part, or the key part? Jennifer Prendki believes if we continue to scale AI, it will get us more of the same of what we have today, and that we should look to biology, life, and possibly consciousness to enhance AI. Jennifer is a former particle physicist turned entrepreneur and AI expert, focusing on curating the right kinds and forms of data to train AI, and in that vein she led those efforts at Deepmind on the foundation models ubiquitous in our lives now.



I was curious why someone with that background would come to the conclusion that AI needs inspiration from life, biology, and consciousness to move forward gracefully, and that it would be useful to better understand those processes in ourselves before trying to build what some people call AGI, whatever that is. Her perspective is a rarity among her cohorts, which we also discuss. And get this: she's interested in these topics because she cares about what happens to the planet and to us as a species. Perhaps also a rarity among those charging ahead to dominate profits and win the race




Jennifer's website: Quantum of Data.



The blog posts we discuss:

The Myth of Emergence



Embodiment & Sentience: Why the Body still Matters



The Architecture of Synthetic Consciousness



On Time and Consciousness



Superalignment and the Question of AI Personhood.






Read the transcript.



0:00 - Intro
3:25 - Jennifer's background
13:10 - Consciousness
16:38 - Life and consciousness
23:16 - Superalignment
40:11 - Quantum
1:04:45 - Wetware and biological mimicry
1:15:03 - Neural interfaces
1:16:48 - AI ethics
1:2:35 - AI models are not models
1:27:13 - What scaling will get us
1:39:53 - Current roadblocks
1:43:19 - Philosophy]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 217 Jennifer Prendki: Consciousness, Life, AI, and Quantum Physics]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes, full archive, and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p><em>The Transmitter</em> is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.</p>



<p>Read more about <a href="https://www.thetransmitter.org/partners/">our partnership</a>.</p>



<p>Sign up for <a href="https://www.thetransmitter.org/newsletters/">Brain Inspired email alerts</a> to be notified every time a new Brain Inspired episode is released.</p>



<p>To explore more neuroscience news and perspectives, visit <a href="http://thetransmitter.org">thetransmitter.org</a>.</p>



<p>Do AI engineers need to emulate some processes and features found only in living organisms at the moment, like how brains are inextricably integrated with bodies? Is consciousness necessary for AI entities if we want them to play nice with us? Is quantum physics part of that story, or a key part, or <em>the</em> key part? Jennifer Prendki believes if we continue to scale AI, it will get us more of the same of what we have today, and that we should look to biology, life, and possibly consciousness to enhance AI. Jennifer is a former particle physicist turned entrepreneur and AI expert, focusing on curating the right kinds and forms of data to train AI, and in that vein she led those efforts at Deepmind on the foundation models ubiquitous in our lives now.</p>



<p>I was curious why someone with that background would come to the conclusion that AI needs inspiration from life, biology, and consciousness to move forward gracefully, and that it would be useful to better understand those processes in ourselves before trying to build what some people call AGI, whatever that is. Her perspective is a rarity among her cohorts, which we also discuss. And get this: she's interested in these topics because she cares about what happens to the planet and to us as a species. Perhaps also a rarity among those charging ahead to dominate profits and win the race</p>



<ul class="wp-block-list">
<li>Jennifer's website: <a href="https://www.quantumofdata.com/">Quantum of Data</a>.</li>



<li>The blog posts we discuss:
<ul class="wp-block-list">
<li><a href="https://www.quantumofdata.com/blog-posts/the-myth-of-emergence">The Myth of Emergence</a></li>



<li><a href="https://www.quantumofdata.com/blog-posts/embodiment-sentience-why-the-body-still-matters">Embodiment &amp; Sentience: Why the Body still Matters</a></li>



<li><a href="https://www.quantumofdata.com/blog-posts/the-architecture-of-synthetic-consciousness">The Architecture of Synthetic Consciousness</a></li>



<li><a href="https://www.quantumofdata.com/blog-posts/on-time-and-consciousness">On Time and Consciousness</a></li>



<li><a href="https://www.quantumofdata.com/blog-posts/superalignment-and-the-question-of-ai-personhood">Superalignment and the Question of AI Personhood</a>.</li>
</ul>
</li>
</ul>



<p>Read the <a href="https://www.thetransmitter.org/wp-content/uploads/2025/07/FINAL_BI-217-transcript-prendki.pdf" target="_blank" rel="noreferrer noopener">transcript</a>.</p>



<p>0:00 - Intro
3:25 - Jennifer's background
13:10 - Consciousness
16:38 - Life and consciousness
23:16 - Superalignment
40:11 - Quantum
1:04:45 - Wetware and biological mimicry
1:15:03 - Neural interfaces
1:16:48 - AI ethics
1:2:35 - AI models are not models
1:27:13 - What scaling will get us
1:39:53 - Current roadblocks
1:43:19 - Philosophy</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/2099885/c1e-m1mmsq4gmnc3p1jk-v64wdx9zaj1-4hdl9z.mp3" length="105719261"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Support the show to get full episodes, full archive, and join the Discord community.









The Transmitter is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.



Read more about our partnership.



Sign up for Brain Inspired email alerts to be notified every time a new Brain Inspired episode is released.



To explore more neuroscience news and perspectives, visit thetransmitter.org.



Do AI engineers need to emulate some processes and features found only in living organisms at the moment, like how brains are inextricably integrated with bodies? Is consciousness necessary for AI entities if we want them to play nice with us? Is quantum physics part of that story, or a key part, or the key part? Jennifer Prendki believes if we continue to scale AI, it will get us more of the same of what we have today, and that we should look to biology, life, and possibly consciousness to enhance AI. Jennifer is a former particle physicist turned entrepreneur and AI expert, focusing on curating the right kinds and forms of data to train AI, and in that vein she led those efforts at Deepmind on the foundation models ubiquitous in our lives now.



I was curious why someone with that background would come to the conclusion that AI needs inspiration from life, biology, and consciousness to move forward gracefully, and that it would be useful to better understand those processes in ourselves before trying to build what some people call AGI, whatever that is. Her perspective is a rarity among her cohorts, which we also discuss. And get this: she's interested in these topics because she cares about what happens to the planet and to us as a species. Perhaps also a rarity among those charging ahead to dominate profits and win the race




Jennifer's website: Quantum of Data.



The blog posts we discuss:

The Myth of Emergence



Embodiment & Sentience: Why the Body still Matters



The Architecture of Synthetic Consciousness



On Time and Consciousness



Superalignment and the Question of AI Personhood.






Read the transcript.



0:00 - Intro
3:25 - Jennifer's background
13:10 - Consciousness
16:38 - Life and consciousness
23:16 - Superalignment
40:11 - Quantum
1:04:45 - Wetware and biological mimicry
1:15:03 - Neural interfaces
1:16:48 - AI ethics
1:2:35 - AI models are not models
1:27:13 - What scaling will get us
1:39:53 - Current roadblocks
1:43:19 - Philosophy]]>
                </itunes:summary>
                                                                            <itunes:duration>01:48:53</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 216 Woodrow Shew and Keith Hengen: The Nature of Brain Criticality]]>
                </title>
                <pubDate>Wed, 16 Jul 2025 04:00:00 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/2089187</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-216-woodrow-shew-and-keith-hengen-the-nature-ofvf8</link>
                                <description>
                                            <![CDATA[<a href="https://www.patreon.com/braininspired"></a>



<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes, full archive, and join the Discord community</strong>.</p>







<p><em>The Transmitter</em> is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.</p>



<p>Read more about <a href="https://www.thetransmitter.org/partners/">our partnership</a>.</p>



<p>Sign up for <a href="https://www.thetransmitter.org/newsletters/">Brain Inspired email alerts</a> to be notified every time a new Brain Inspired episode is released.</p>



<p>To explore more neuroscience news and perspectives, visit <a href="http://thetransmitter.org">thetransmitter.org</a>.</p>



<p>A few episodes ago, episode 212, I <a href="https://braininspired.co/podcast/212/">conversed with John Beggs</a> about how criticality might be an important dynamic regime of brain function to optimize our cognition and behavior. Today we continue and extend that exploration with a few other folks in the criticality world.</p>



<p>Woodrow Shew is a professor and runs the <a href="https://www.woodrowshew.com/home">Shew Lab</a> at the University of Arkansas. Keith Hengen is an associate professor and runs the <a href="https://hengenlab.org/">Hengen Lab</a> at Washington University in St. Louis Missouri. Together, they are Hengen and Shew on a recent review paper in Neuron, titled <a href="https://www.cell.com/neuron/fulltext/S0896-6273(25)00391-5">Is criticality a unified setpoint of brain function?</a> In the review they argue that criticality is a kind of homeostatic goal of neural activity, describing multiple properties and signatures of criticality, they discuss multiple testable predictions of their thesis, and they address the historical and current controversies surrounding criticality in the brain, surveying what Woody thinks is all the past studies on criticality, which is over 300. And they offer a account of why many of these past studies did not find criticality, but looking through a modern lens they most likely would. We discuss some of the topics in their paper, but we also dance around their current thoughts about things like the nature and implications of being nearer and farther from critical dynamics, the relation between criticality and neural manifolds, and a lot more. You get to experience Woody and Keith thinking in real time about these things, which I hope you appreciate.</p>



<ul class="wp-block-list">
<li><a href="https://www.woodrowshew.com/home">Shew Lab</a>.  <a href="https://x.com/ShewLab">@ShewLab</a></li>



<li><a href="https://hengenlab.org/">Hengen Lab</a>.</li>



<li><a href="https://www.cell.com/neuron/fulltext/S0896-6273(25)00391-5">Is criticality a unified setpoint of brain function?</a></li>
</ul>



<p>Read the <a href="https://www.thetransmitter.org/wp-content/uploads/2025/07/BI-216-transcript-hengen-shew-1.pdf" target="_blank" rel="noreferrer noopener">transcript</a>.</p>



<p>0:00 - Intro
3:41 - Collaborating
6:22 - Criticality community
14:47 - Tasks vs. Naturalistic
20:50 - Nature of criticality
25:47 - Deviating from criticality
33:45 - Sleep for criticality
38:41 - Neuromodulation for criticality
40:45 - Criticality Definition part 1: scale invariance
43:14 - Criticality Definition part 2: At a boundary
51:56 - New method to assess criticality
56:12 - Types of criticality
1:02:23 - Value of criticality versus other metrics
1:15:21 - Manifolds and criticality
1:26:06 - Current challenges</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[



Support the show to get full episodes, full archive, and join the Discord community.







The Transmitter is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.



Read more about our partnership.



Sign up for Brain Inspired email alerts to be notified every time a new Brain Inspired episode is released.



To explore more neuroscience news and perspectives, visit thetransmitter.org.



A few episodes ago, episode 212, I conversed with John Beggs about how criticality might be an important dynamic regime of brain function to optimize our cognition and behavior. Today we continue and extend that exploration with a few other folks in the criticality world.



Woodrow Shew is a professor and runs the Shew Lab at the University of Arkansas. Keith Hengen is an associate professor and runs the Hengen Lab at Washington University in St. Louis Missouri. Together, they are Hengen and Shew on a recent review paper in Neuron, titled Is criticality a unified setpoint of brain function? In the review they argue that criticality is a kind of homeostatic goal of neural activity, describing multiple properties and signatures of criticality, they discuss multiple testable predictions of their thesis, and they address the historical and current controversies surrounding criticality in the brain, surveying what Woody thinks is all the past studies on criticality, which is over 300. And they offer a account of why many of these past studies did not find criticality, but looking through a modern lens they most likely would. We discuss some of the topics in their paper, but we also dance around their current thoughts about things like the nature and implications of being nearer and farther from critical dynamics, the relation between criticality and neural manifolds, and a lot more. You get to experience Woody and Keith thinking in real time about these things, which I hope you appreciate.




Shew Lab.  @ShewLab



Hengen Lab.



Is criticality a unified setpoint of brain function?




Read the transcript.



0:00 - Intro
3:41 - Collaborating
6:22 - Criticality community
14:47 - Tasks vs. Naturalistic
20:50 - Nature of criticality
25:47 - Deviating from criticality
33:45 - Sleep for criticality
38:41 - Neuromodulation for criticality
40:45 - Criticality Definition part 1: scale invariance
43:14 - Criticality Definition part 2: At a boundary
51:56 - New method to assess criticality
56:12 - Types of criticality
1:02:23 - Value of criticality versus other metrics
1:15:21 - Manifolds and criticality
1:26:06 - Current challenges]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 216 Woodrow Shew and Keith Hengen: The Nature of Brain Criticality]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<a href="https://www.patreon.com/braininspired"></a>



<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes, full archive, and join the Discord community</strong>.</p>







<p><em>The Transmitter</em> is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.</p>



<p>Read more about <a href="https://www.thetransmitter.org/partners/">our partnership</a>.</p>



<p>Sign up for <a href="https://www.thetransmitter.org/newsletters/">Brain Inspired email alerts</a> to be notified every time a new Brain Inspired episode is released.</p>



<p>To explore more neuroscience news and perspectives, visit <a href="http://thetransmitter.org">thetransmitter.org</a>.</p>



<p>A few episodes ago, episode 212, I <a href="https://braininspired.co/podcast/212/">conversed with John Beggs</a> about how criticality might be an important dynamic regime of brain function to optimize our cognition and behavior. Today we continue and extend that exploration with a few other folks in the criticality world.</p>



<p>Woodrow Shew is a professor and runs the <a href="https://www.woodrowshew.com/home">Shew Lab</a> at the University of Arkansas. Keith Hengen is an associate professor and runs the <a href="https://hengenlab.org/">Hengen Lab</a> at Washington University in St. Louis Missouri. Together, they are Hengen and Shew on a recent review paper in Neuron, titled <a href="https://www.cell.com/neuron/fulltext/S0896-6273(25)00391-5">Is criticality a unified setpoint of brain function?</a> In the review they argue that criticality is a kind of homeostatic goal of neural activity, describing multiple properties and signatures of criticality, they discuss multiple testable predictions of their thesis, and they address the historical and current controversies surrounding criticality in the brain, surveying what Woody thinks is all the past studies on criticality, which is over 300. And they offer a account of why many of these past studies did not find criticality, but looking through a modern lens they most likely would. We discuss some of the topics in their paper, but we also dance around their current thoughts about things like the nature and implications of being nearer and farther from critical dynamics, the relation between criticality and neural manifolds, and a lot more. You get to experience Woody and Keith thinking in real time about these things, which I hope you appreciate.</p>



<ul class="wp-block-list">
<li><a href="https://www.woodrowshew.com/home">Shew Lab</a>.  <a href="https://x.com/ShewLab">@ShewLab</a></li>



<li><a href="https://hengenlab.org/">Hengen Lab</a>.</li>



<li><a href="https://www.cell.com/neuron/fulltext/S0896-6273(25)00391-5">Is criticality a unified setpoint of brain function?</a></li>
</ul>



<p>Read the <a href="https://www.thetransmitter.org/wp-content/uploads/2025/07/BI-216-transcript-hengen-shew-1.pdf" target="_blank" rel="noreferrer noopener">transcript</a>.</p>



<p>0:00 - Intro
3:41 - Collaborating
6:22 - Criticality community
14:47 - Tasks vs. Naturalistic
20:50 - Nature of criticality
25:47 - Deviating from criticality
33:45 - Sleep for criticality
38:41 - Neuromodulation for criticality
40:45 - Criticality Definition part 1: scale invariance
43:14 - Criticality Definition part 2: At a boundary
51:56 - New method to assess criticality
56:12 - Types of criticality
1:02:23 - Value of criticality versus other metrics
1:15:21 - Manifolds and criticality
1:26:06 - Current challenges</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/2089187/c1e-5k88h17gnvtjx3m0-dm24nqo0c112-liltyl.mp3" length="91920669"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[



Support the show to get full episodes, full archive, and join the Discord community.







The Transmitter is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.



Read more about our partnership.



Sign up for Brain Inspired email alerts to be notified every time a new Brain Inspired episode is released.



To explore more neuroscience news and perspectives, visit thetransmitter.org.



A few episodes ago, episode 212, I conversed with John Beggs about how criticality might be an important dynamic regime of brain function to optimize our cognition and behavior. Today we continue and extend that exploration with a few other folks in the criticality world.



Woodrow Shew is a professor and runs the Shew Lab at the University of Arkansas. Keith Hengen is an associate professor and runs the Hengen Lab at Washington University in St. Louis Missouri. Together, they are Hengen and Shew on a recent review paper in Neuron, titled Is criticality a unified setpoint of brain function? In the review they argue that criticality is a kind of homeostatic goal of neural activity, describing multiple properties and signatures of criticality, they discuss multiple testable predictions of their thesis, and they address the historical and current controversies surrounding criticality in the brain, surveying what Woody thinks is all the past studies on criticality, which is over 300. And they offer a account of why many of these past studies did not find criticality, but looking through a modern lens they most likely would. We discuss some of the topics in their paper, but we also dance around their current thoughts about things like the nature and implications of being nearer and farther from critical dynamics, the relation between criticality and neural manifolds, and a lot more. You get to experience Woody and Keith thinking in real time about these things, which I hope you appreciate.




Shew Lab.  @ShewLab



Hengen Lab.



Is criticality a unified setpoint of brain function?




Read the transcript.



0:00 - Intro
3:41 - Collaborating
6:22 - Criticality community
14:47 - Tasks vs. Naturalistic
20:50 - Nature of criticality
25:47 - Deviating from criticality
33:45 - Sleep for criticality
38:41 - Neuromodulation for criticality
40:45 - Criticality Definition part 1: scale invariance
43:14 - Criticality Definition part 2: At a boundary
51:56 - New method to assess criticality
56:12 - Types of criticality
1:02:23 - Value of criticality versus other metrics
1:15:21 - Manifolds and criticality
1:26:06 - Current challenges]]>
                </itunes:summary>
                                                                            <itunes:duration>01:34:21</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 215 Xiao-Jing Wang: Theoretical Neuroscience Comes of Age]]>
                </title>
                <pubDate>Wed, 02 Jul 2025 14:24:46 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/2079670</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-215-xiao-jing-wang-theoretical-neuroscience-comelmu</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes, full archive, and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p><em>The Transmitter</em> is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.</p>



<p>Read more about <a href="https://www.thetransmitter.org/partners/">our partnership</a>.</p>



<p>Sign up for <a href="https://www.thetransmitter.org/newsletters/">Brain Inspired email alerts</a> to be notified every time a new Brain Inspired episode is released.</p>



<p>To explore more neuroscience news and perspectives, visit <a href="http://thetransmitter.org">thetransmitter.org</a>.</p>



<p>Xiao-Jing Wang is a Distinguished Global Professor of Neuroscience at NYU</p>



<p>Xiao-Jing was born and grew up in China, spent 8 years in Belgium studying theoretical physics like nonlinear dynamical systems and deterministic chaos. And as he says it, he arrived from Brussels to California as a postdoc, and in one day switched from French to English, from European to American culture, and physics to neuroscience. I know Xiao-Jing as a legend in non-human primate neurophysiology and modeling, paving the way for the rest of us to study brain activity related cognitive functions like working memory and decision-making.</p>



<p>He has just released his new textbook, <a href="https://amzn.to/4emD7lh">Theoretical Neuroscience: Understanding Cognition</a>, which covers the history and current research on modeling cognitive functions from the very simple to the very cognitive. The book is also somewhat philosophical, arguing that we need to update our approach to explaining how brains function, to go beyond Marr's levels and enter a cross-level mechanistic explanatory pursuit, which we discuss. I just learned he even cites my own PhD research, studying metacognition in nonhuman primates - so you know it's a great book. Learn more about Xiao-Jing and the book in the show notes. It was fun having one of my heroes on the podcast, and I hope you enjoy our discussion.</p>



<ul class="wp-block-list">
<li><a href="https://www.cns.nyu.edu/wanglab/">Computational Laboratory of Cortical Dynamics</a></li>



<li>Book: <a href="https://amzn.to/4emD7lh">Theoretical Neuroscience: Understanding Cognition</a>.</li>



<li>Related papers
<ul class="wp-block-list">
<li><a href="https://www.cns.nyu.edu/wanglab/publications/pdf/wang04.pnas.pdf">Division of labor among distinct subtypes of inhibitory neurons in a cortical microcircuit of working memory</a>.</li>



<li><a href="https://www.cns.nyu.edu/wanglab/publications/pdf/wang.nrns2020.pdf">Macroscopic gradients of synaptic excitation and inhibition across the neocortex</a>.</li>



<li><a href="https://www.cns.nyu.edu/wanglab/publications/pdf/wang.arns2022.pdf">Theory of the multiregional neocortex: large-scale neural dynamics and distributed cognition</a>.</li>
</ul>
</li>
</ul>



<p>0:00 - Intro
3:08 - Why the book now?
11:00 - Modularity in neuro vs AI
14:01 - Working memory and modularity
22:37 - Canonical cortical microcircuits
25:53 - Gradient of inhibitory neurons
27:47 - Comp neuro then and now
45:35 - Cross-level mechanistic understanding
1:13:38 - Bifurcation
1:24:51 - Bifurcation and degeneracy
1:34:02 - Control theory
1:35:41 - Psychiatric disorders
1:39:14 - Beyond dynamical systems
1:43:447 - Mouse as a model
1:48:11 - AI needs a PFC</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Support the show to get full episodes, full archive, and join the Discord community.









The Transmitter is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.



Read more about our partnership.



Sign up for Brain Inspired email alerts to be notified every time a new Brain Inspired episode is released.



To explore more neuroscience news and perspectives, visit thetransmitter.org.



Xiao-Jing Wang is a Distinguished Global Professor of Neuroscience at NYU



Xiao-Jing was born and grew up in China, spent 8 years in Belgium studying theoretical physics like nonlinear dynamical systems and deterministic chaos. And as he says it, he arrived from Brussels to California as a postdoc, and in one day switched from French to English, from European to American culture, and physics to neuroscience. I know Xiao-Jing as a legend in non-human primate neurophysiology and modeling, paving the way for the rest of us to study brain activity related cognitive functions like working memory and decision-making.



He has just released his new textbook, Theoretical Neuroscience: Understanding Cognition, which covers the history and current research on modeling cognitive functions from the very simple to the very cognitive. The book is also somewhat philosophical, arguing that we need to update our approach to explaining how brains function, to go beyond Marr's levels and enter a cross-level mechanistic explanatory pursuit, which we discuss. I just learned he even cites my own PhD research, studying metacognition in nonhuman primates - so you know it's a great book. Learn more about Xiao-Jing and the book in the show notes. It was fun having one of my heroes on the podcast, and I hope you enjoy our discussion.




Computational Laboratory of Cortical Dynamics



Book: Theoretical Neuroscience: Understanding Cognition.



Related papers

Division of labor among distinct subtypes of inhibitory neurons in a cortical microcircuit of working memory.



Macroscopic gradients of synaptic excitation and inhibition across the neocortex.



Theory of the multiregional neocortex: large-scale neural dynamics and distributed cognition.






0:00 - Intro
3:08 - Why the book now?
11:00 - Modularity in neuro vs AI
14:01 - Working memory and modularity
22:37 - Canonical cortical microcircuits
25:53 - Gradient of inhibitory neurons
27:47 - Comp neuro then and now
45:35 - Cross-level mechanistic understanding
1:13:38 - Bifurcation
1:24:51 - Bifurcation and degeneracy
1:34:02 - Control theory
1:35:41 - Psychiatric disorders
1:39:14 - Beyond dynamical systems
1:43:447 - Mouse as a model
1:48:11 - AI needs a PFC]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 215 Xiao-Jing Wang: Theoretical Neuroscience Comes of Age]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes, full archive, and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p><em>The Transmitter</em> is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.</p>



<p>Read more about <a href="https://www.thetransmitter.org/partners/">our partnership</a>.</p>



<p>Sign up for <a href="https://www.thetransmitter.org/newsletters/">Brain Inspired email alerts</a> to be notified every time a new Brain Inspired episode is released.</p>



<p>To explore more neuroscience news and perspectives, visit <a href="http://thetransmitter.org">thetransmitter.org</a>.</p>



<p>Xiao-Jing Wang is a Distinguished Global Professor of Neuroscience at NYU</p>



<p>Xiao-Jing was born and grew up in China, spent 8 years in Belgium studying theoretical physics like nonlinear dynamical systems and deterministic chaos. And as he says it, he arrived from Brussels to California as a postdoc, and in one day switched from French to English, from European to American culture, and physics to neuroscience. I know Xiao-Jing as a legend in non-human primate neurophysiology and modeling, paving the way for the rest of us to study brain activity related cognitive functions like working memory and decision-making.</p>



<p>He has just released his new textbook, <a href="https://amzn.to/4emD7lh">Theoretical Neuroscience: Understanding Cognition</a>, which covers the history and current research on modeling cognitive functions from the very simple to the very cognitive. The book is also somewhat philosophical, arguing that we need to update our approach to explaining how brains function, to go beyond Marr's levels and enter a cross-level mechanistic explanatory pursuit, which we discuss. I just learned he even cites my own PhD research, studying metacognition in nonhuman primates - so you know it's a great book. Learn more about Xiao-Jing and the book in the show notes. It was fun having one of my heroes on the podcast, and I hope you enjoy our discussion.</p>



<ul class="wp-block-list">
<li><a href="https://www.cns.nyu.edu/wanglab/">Computational Laboratory of Cortical Dynamics</a></li>



<li>Book: <a href="https://amzn.to/4emD7lh">Theoretical Neuroscience: Understanding Cognition</a>.</li>



<li>Related papers
<ul class="wp-block-list">
<li><a href="https://www.cns.nyu.edu/wanglab/publications/pdf/wang04.pnas.pdf">Division of labor among distinct subtypes of inhibitory neurons in a cortical microcircuit of working memory</a>.</li>



<li><a href="https://www.cns.nyu.edu/wanglab/publications/pdf/wang.nrns2020.pdf">Macroscopic gradients of synaptic excitation and inhibition across the neocortex</a>.</li>



<li><a href="https://www.cns.nyu.edu/wanglab/publications/pdf/wang.arns2022.pdf">Theory of the multiregional neocortex: large-scale neural dynamics and distributed cognition</a>.</li>
</ul>
</li>
</ul>



<p>0:00 - Intro
3:08 - Why the book now?
11:00 - Modularity in neuro vs AI
14:01 - Working memory and modularity
22:37 - Canonical cortical microcircuits
25:53 - Gradient of inhibitory neurons
27:47 - Comp neuro then and now
45:35 - Cross-level mechanistic understanding
1:13:38 - Bifurcation
1:24:51 - Bifurcation and degeneracy
1:34:02 - Control theory
1:35:41 - Psychiatric disorders
1:39:14 - Beyond dynamical systems
1:43:447 - Mouse as a model
1:48:11 - AI needs a PFC</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/2079670/c1e-jjrra55gk9fwjw2r-7z3qkvqwc8om-4er4ud.mp3" length="108836120"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Support the show to get full episodes, full archive, and join the Discord community.









The Transmitter is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.



Read more about our partnership.



Sign up for Brain Inspired email alerts to be notified every time a new Brain Inspired episode is released.



To explore more neuroscience news and perspectives, visit thetransmitter.org.



Xiao-Jing Wang is a Distinguished Global Professor of Neuroscience at NYU



Xiao-Jing was born and grew up in China, spent 8 years in Belgium studying theoretical physics like nonlinear dynamical systems and deterministic chaos. And as he says it, he arrived from Brussels to California as a postdoc, and in one day switched from French to English, from European to American culture, and physics to neuroscience. I know Xiao-Jing as a legend in non-human primate neurophysiology and modeling, paving the way for the rest of us to study brain activity related cognitive functions like working memory and decision-making.



He has just released his new textbook, Theoretical Neuroscience: Understanding Cognition, which covers the history and current research on modeling cognitive functions from the very simple to the very cognitive. The book is also somewhat philosophical, arguing that we need to update our approach to explaining how brains function, to go beyond Marr's levels and enter a cross-level mechanistic explanatory pursuit, which we discuss. I just learned he even cites my own PhD research, studying metacognition in nonhuman primates - so you know it's a great book. Learn more about Xiao-Jing and the book in the show notes. It was fun having one of my heroes on the podcast, and I hope you enjoy our discussion.




Computational Laboratory of Cortical Dynamics



Book: Theoretical Neuroscience: Understanding Cognition.



Related papers

Division of labor among distinct subtypes of inhibitory neurons in a cortical microcircuit of working memory.



Macroscopic gradients of synaptic excitation and inhibition across the neocortex.



Theory of the multiregional neocortex: large-scale neural dynamics and distributed cognition.






0:00 - Intro
3:08 - Why the book now?
11:00 - Modularity in neuro vs AI
14:01 - Working memory and modularity
22:37 - Canonical cortical microcircuits
25:53 - Gradient of inhibitory neurons
27:47 - Comp neuro then and now
45:35 - Cross-level mechanistic understanding
1:13:38 - Bifurcation
1:24:51 - Bifurcation and degeneracy
1:34:02 - Control theory
1:35:41 - Psychiatric disorders
1:39:14 - Beyond dynamical systems
1:43:447 - Mouse as a model
1:48:11 - AI needs a PFC]]>
                </itunes:summary>
                                                                            <itunes:duration>01:52:02</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 214 Nicole Rust: How To Actually Fix Brains and Minds]]>
                </title>
                <pubDate>Wed, 18 Jun 2025 04:00:00 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/2068633</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-214-nicole-rust-how-to-actually-fix-brains-and-minds</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes, full archive, and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p><em>The Transmitter</em> is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.</p>



<p>Read more about <a href="https://www.thetransmitter.org/partners/">our partnership</a>.</p>



<p>Check out this story:</p>



<p><a href="https://www.thetransmitter.org/the-big-picture/what-if-anything-makes-mood-fundamentally-different-from-memory/">What, if anything, makes mood fundamentally different from memory?</a></p>



<p>Sign up for <a href="https://www.thetransmitter.org/newsletters/">Brain Inspired email alerts</a> to be notified every time a new Brain Inspired episode is released.</p>



<p>To explore more neuroscience news and perspectives, visit <a href="http://thetransmitter.org">thetransmitter.org</a>.</p>





<p><a href="https://amzn.to/3H0UmvH">Elusive Cures: Why Neuroscience Hasn’t Solved Brain Disorders―and How We Can Change That</a>. Nicole Rust runs the Visual Memory laboratory at the University of Pennsylvania. Her interests have expanded now to include mood and feelings, as you'll hear. And she wrote this book, which contains a plethora of ideas about how we can pave a way forward in neuroscience to help treat mental and brain disorders. We talk about a small plethora of those ideas from her book. which also contains the story partially which will hear of her own journey in thinking about these things from working early on in visual neuroscience to where she is now.</p>



<ul class="wp-block-list">
<li><a href="https://www.nicolecrust.com/">Nicole's website</a>.</li>



<li><a href="https://amzn.to/3H0UmvH">Elusive Cures: Why Neuroscience Hasn’t Solved Brain Disorders―and How We Can Change That</a>.</li>
</ul>



<p>0:00 - Intro
6:12 - Nicole's path
19:25 - The grand plan
25:18 - Robustness and fragility
39:15 - Mood
49:25 - Model everything!
56:26 - Epistemic iteration
1:06:50 - Can we standardize mood?
1:10:36 - Perspective neuroscience
1:20:12 - William Wimsatt
1:25:40 - Consciousness</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Support the show to get full episodes, full archive, and join the Discord community.









The Transmitter is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.



Read more about our partnership.



Check out this story:



What, if anything, makes mood fundamentally different from memory?



Sign up for Brain Inspired email alerts to be notified every time a new Brain Inspired episode is released.



To explore more neuroscience news and perspectives, visit thetransmitter.org.





Elusive Cures: Why Neuroscience Hasn’t Solved Brain Disorders―and How We Can Change That. Nicole Rust runs the Visual Memory laboratory at the University of Pennsylvania. Her interests have expanded now to include mood and feelings, as you'll hear. And she wrote this book, which contains a plethora of ideas about how we can pave a way forward in neuroscience to help treat mental and brain disorders. We talk about a small plethora of those ideas from her book. which also contains the story partially which will hear of her own journey in thinking about these things from working early on in visual neuroscience to where she is now.




Nicole's website.



Elusive Cures: Why Neuroscience Hasn’t Solved Brain Disorders―and How We Can Change That.




0:00 - Intro
6:12 - Nicole's path
19:25 - The grand plan
25:18 - Robustness and fragility
39:15 - Mood
49:25 - Model everything!
56:26 - Epistemic iteration
1:06:50 - Can we standardize mood?
1:10:36 - Perspective neuroscience
1:20:12 - William Wimsatt
1:25:40 - Consciousness]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 214 Nicole Rust: How To Actually Fix Brains and Minds]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes, full archive, and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p><em>The Transmitter</em> is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.</p>



<p>Read more about <a href="https://www.thetransmitter.org/partners/">our partnership</a>.</p>



<p>Check out this story:</p>



<p><a href="https://www.thetransmitter.org/the-big-picture/what-if-anything-makes-mood-fundamentally-different-from-memory/">What, if anything, makes mood fundamentally different from memory?</a></p>



<p>Sign up for <a href="https://www.thetransmitter.org/newsletters/">Brain Inspired email alerts</a> to be notified every time a new Brain Inspired episode is released.</p>



<p>To explore more neuroscience news and perspectives, visit <a href="http://thetransmitter.org">thetransmitter.org</a>.</p>





<p><a href="https://amzn.to/3H0UmvH">Elusive Cures: Why Neuroscience Hasn’t Solved Brain Disorders―and How We Can Change That</a>. Nicole Rust runs the Visual Memory laboratory at the University of Pennsylvania. Her interests have expanded now to include mood and feelings, as you'll hear. And she wrote this book, which contains a plethora of ideas about how we can pave a way forward in neuroscience to help treat mental and brain disorders. We talk about a small plethora of those ideas from her book. which also contains the story partially which will hear of her own journey in thinking about these things from working early on in visual neuroscience to where she is now.</p>



<ul class="wp-block-list">
<li><a href="https://www.nicolecrust.com/">Nicole's website</a>.</li>



<li><a href="https://amzn.to/3H0UmvH">Elusive Cures: Why Neuroscience Hasn’t Solved Brain Disorders―and How We Can Change That</a>.</li>
</ul>



<p>0:00 - Intro
6:12 - Nicole's path
19:25 - The grand plan
25:18 - Robustness and fragility
39:15 - Mood
49:25 - Model everything!
56:26 - Epistemic iteration
1:06:50 - Can we standardize mood?
1:10:36 - Perspective neuroscience
1:20:12 - William Wimsatt
1:25:40 - Consciousness</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/2068633/c1e-m1mmsqnm5kfx270g-mk4z905dsmg3-94wuyi.mp3" length="90784826"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Support the show to get full episodes, full archive, and join the Discord community.









The Transmitter is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.



Read more about our partnership.



Check out this story:



What, if anything, makes mood fundamentally different from memory?



Sign up for Brain Inspired email alerts to be notified every time a new Brain Inspired episode is released.



To explore more neuroscience news and perspectives, visit thetransmitter.org.





Elusive Cures: Why Neuroscience Hasn’t Solved Brain Disorders―and How We Can Change That. Nicole Rust runs the Visual Memory laboratory at the University of Pennsylvania. Her interests have expanded now to include mood and feelings, as you'll hear. And she wrote this book, which contains a plethora of ideas about how we can pave a way forward in neuroscience to help treat mental and brain disorders. We talk about a small plethora of those ideas from her book. which also contains the story partially which will hear of her own journey in thinking about these things from working early on in visual neuroscience to where she is now.




Nicole's website.



Elusive Cures: Why Neuroscience Hasn’t Solved Brain Disorders―and How We Can Change That.




0:00 - Intro
6:12 - Nicole's path
19:25 - The grand plan
25:18 - Robustness and fragility
39:15 - Mood
49:25 - Model everything!
56:26 - Epistemic iteration
1:06:50 - Can we standardize mood?
1:10:36 - Perspective neuroscience
1:20:12 - William Wimsatt
1:25:40 - Consciousness]]>
                </itunes:summary>
                                                                            <itunes:duration>01:33:26</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 213 Representations in Minds and Brains]]>
                </title>
                <pubDate>Wed, 04 Jun 2025 04:00:00 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/2057603</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-213-representations-in-minds-and-brains</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes, full archive, and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p><em>The Transmitter</em> is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.</p>



<p>Read more about <a href="https://www.thetransmitter.org/partners/">our partnership</a>.</p>



<p>Check out this series of essays about representations:</p>



<p><a href="https://www.thetransmitter.org/defining-representations/what-are-we-talking-about-clarifying-the-fuzzy-concept-of-representation-in-neuroscience-and-beyond/">What are we talking about? Clarifying the fuzzy concept of representation in neuroscience and beyond</a></p>



<p>Sign up for <a href="https://www.thetransmitter.org/newsletters/">Brain Inspired email alerts</a> to be notified every time a new Brain Inspired episode is released.</p>



<p>To explore more neuroscience news and perspectives, visit <a href="http://thetransmitter.org">thetransmitter.org</a>.</p>



<p>What do neuroscientists mean when they use the term representation? That's part of what <a href="https://luishfavela.wixsite.com/luishfavela">Luis Favela</a> and <a href="https://www.edouardmachery.com/">Edouard Machery</a> set out to answer a couple years ago by surveying lots of folks in the cognitive sciences, and they concluded that as a field the term is used in a confused and unclear way. Confused and unclear are technical terms here, and Luis and Edouard explain what they mean in the episode. More recently Luis and Edouard wrote a follow-up piece arguing that maybe it's okay for everyone to use the term in slightly different ways, maybe it helps communication across disciplines, perhaps. My three other guests today, <a href="https://frances-egan.org/index.html">Frances Egan</a>, <a href="https://sites.google.com/site/luosha/home/research">Rosa Cao</a>, and <a href="https://www.blam-lab-jhu.org/">John Krakauer</a> wrote responses to that argument, and on today's episode all those folks are here to further discuss that issue and why it matters. Luis is a part philosopher, part cognitive scientists at Indiana University Bloomington, Edouard is a philosopher and Director of the Center for Philosophy of Science at the University of Pittsburgh, Frances is a philosopher from Rutgers University, Rosa is a neuroscientist-turned philosopher at Stanford University, and John is a neuroscientist among other things, and co-runs the Brain, Learning, Animation, and Movement Lab at Johns Hopkins.</p>



<ul class="wp-block-list">
<li><a href="https://luishfavela.wixsite.com/luishfavela">Luis Favela</a>.
<ul class="wp-block-list">
<li>Favela's book: <a href="https://amzn.to/3LbSgrI">The Ecological Brain: Unifying the Sciences of Brain, Body, and Environment</a></li>
</ul>
</li>



<li><a href="https://www.edouardmachery.com/">Edouard Machery</a>.
<ul class="wp-block-list">
<li>Machery's book: <a href="https://academic.oup.com/book/11923">Doing without Concepts</a></li>
</ul>
</li>



<li><a href="https://frances-egan.org/index.html">Frances Egan</a>.
<ul class="wp-block-list">
<li>Egan's book: <a href="https://amzn.to/4mvsEYs">Deflating Mental Representation</a>.</li>
</ul>
</li>



<li><a href="https://www.blam-lab-jhu.org/">John Krakauer</a>.</li>



<li><a href="https://sites.google.com/site/luosha/home/research">Rosa Cao</a>.
<ul class="wp-block-list">
<li><a href="https://link.springer.com/article/10.1007/s11229-022-03522-3">Paper mentioned: Putting representations to use</a>.</li>
</ul>
</li>



<li>The exchange, in order, discussed on this episode:
<ul class="wp-block-list">
<li><a href="https://www.frontiersin.org/journals/psychology/articles/10.338..."></a></li></ul></li></ul>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Support the show to get full episodes, full archive, and join the Discord community.









The Transmitter is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.



Read more about our partnership.



Check out this series of essays about representations:



What are we talking about? Clarifying the fuzzy concept of representation in neuroscience and beyond



Sign up for Brain Inspired email alerts to be notified every time a new Brain Inspired episode is released.



To explore more neuroscience news and perspectives, visit thetransmitter.org.



What do neuroscientists mean when they use the term representation? That's part of what Luis Favela and Edouard Machery set out to answer a couple years ago by surveying lots of folks in the cognitive sciences, and they concluded that as a field the term is used in a confused and unclear way. Confused and unclear are technical terms here, and Luis and Edouard explain what they mean in the episode. More recently Luis and Edouard wrote a follow-up piece arguing that maybe it's okay for everyone to use the term in slightly different ways, maybe it helps communication across disciplines, perhaps. My three other guests today, Frances Egan, Rosa Cao, and John Krakauer wrote responses to that argument, and on today's episode all those folks are here to further discuss that issue and why it matters. Luis is a part philosopher, part cognitive scientists at Indiana University Bloomington, Edouard is a philosopher and Director of the Center for Philosophy of Science at the University of Pittsburgh, Frances is a philosopher from Rutgers University, Rosa is a neuroscientist-turned philosopher at Stanford University, and John is a neuroscientist among other things, and co-runs the Brain, Learning, Animation, and Movement Lab at Johns Hopkins.




Luis Favela.

Favela's book: The Ecological Brain: Unifying the Sciences of Brain, Body, and Environment





Edouard Machery.

Machery's book: Doing without Concepts





Frances Egan.

Egan's book: Deflating Mental Representation.





John Krakauer.



Rosa Cao.

Paper mentioned: Putting representations to use.





The exchange, in order, discussed on this episode:

]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 213 Representations in Minds and Brains]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes, full archive, and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p><em>The Transmitter</em> is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.</p>



<p>Read more about <a href="https://www.thetransmitter.org/partners/">our partnership</a>.</p>



<p>Check out this series of essays about representations:</p>



<p><a href="https://www.thetransmitter.org/defining-representations/what-are-we-talking-about-clarifying-the-fuzzy-concept-of-representation-in-neuroscience-and-beyond/">What are we talking about? Clarifying the fuzzy concept of representation in neuroscience and beyond</a></p>



<p>Sign up for <a href="https://www.thetransmitter.org/newsletters/">Brain Inspired email alerts</a> to be notified every time a new Brain Inspired episode is released.</p>



<p>To explore more neuroscience news and perspectives, visit <a href="http://thetransmitter.org">thetransmitter.org</a>.</p>



<p>What do neuroscientists mean when they use the term representation? That's part of what <a href="https://luishfavela.wixsite.com/luishfavela">Luis Favela</a> and <a href="https://www.edouardmachery.com/">Edouard Machery</a> set out to answer a couple years ago by surveying lots of folks in the cognitive sciences, and they concluded that as a field the term is used in a confused and unclear way. Confused and unclear are technical terms here, and Luis and Edouard explain what they mean in the episode. More recently Luis and Edouard wrote a follow-up piece arguing that maybe it's okay for everyone to use the term in slightly different ways, maybe it helps communication across disciplines, perhaps. My three other guests today, <a href="https://frances-egan.org/index.html">Frances Egan</a>, <a href="https://sites.google.com/site/luosha/home/research">Rosa Cao</a>, and <a href="https://www.blam-lab-jhu.org/">John Krakauer</a> wrote responses to that argument, and on today's episode all those folks are here to further discuss that issue and why it matters. Luis is a part philosopher, part cognitive scientists at Indiana University Bloomington, Edouard is a philosopher and Director of the Center for Philosophy of Science at the University of Pittsburgh, Frances is a philosopher from Rutgers University, Rosa is a neuroscientist-turned philosopher at Stanford University, and John is a neuroscientist among other things, and co-runs the Brain, Learning, Animation, and Movement Lab at Johns Hopkins.</p>



<ul class="wp-block-list">
<li><a href="https://luishfavela.wixsite.com/luishfavela">Luis Favela</a>.
<ul class="wp-block-list">
<li>Favela's book: <a href="https://amzn.to/3LbSgrI">The Ecological Brain: Unifying the Sciences of Brain, Body, and Environment</a></li>
</ul>
</li>



<li><a href="https://www.edouardmachery.com/">Edouard Machery</a>.
<ul class="wp-block-list">
<li>Machery's book: <a href="https://academic.oup.com/book/11923">Doing without Concepts</a></li>
</ul>
</li>



<li><a href="https://frances-egan.org/index.html">Frances Egan</a>.
<ul class="wp-block-list">
<li>Egan's book: <a href="https://amzn.to/4mvsEYs">Deflating Mental Representation</a>.</li>
</ul>
</li>



<li><a href="https://www.blam-lab-jhu.org/">John Krakauer</a>.</li>



<li><a href="https://sites.google.com/site/luosha/home/research">Rosa Cao</a>.
<ul class="wp-block-list">
<li><a href="https://link.springer.com/article/10.1007/s11229-022-03522-3">Paper mentioned: Putting representations to use</a>.</li>
</ul>
</li>



<li>The exchange, in order, discussed on this episode:
<ul class="wp-block-list">
<li><a href="https://www.frontiersin.org/journals/psychology/articles/10.3389/fpsyg.2023.1165622/full">Investigating the concept of representation in the neural and psychological sciences</a>.</li>



<li><a href="https://onlinelibrary.wiley.com/doi/10.1111/mila.12531">The concept of representation in the brain sciences: The current status and ways forward</a>.</li>



<li>Commentaries:
<ul class="wp-block-list">
<li><a href="https://onlinelibrary.wiley.com/doi/10.1111/mila.12535">Assessing the landscape of representational concepts: Commentary on Favela and Machery</a>.</li>



<li><a href="https://onlinelibrary.wiley.com/doi/10.1111/mila.12527">Comments on Favela and Machery's The concept of representation in the brain sciences: The current status and ways forward</a>.</li>



<li><a href="https://onlinelibrary.wiley.com/doi/10.1111/mila.12534">Where did real representations go? Commentary on: The concept of representation in the brain sciences: The current status and ways forward by Favela and Machery</a>.</li>



<li>Reply to commentaries:
<ul class="wp-block-list">
<li><a href="https://onlinelibrary.wiley.com/doi/10.1111/mila.12533">Contextualizing, eliminating, or glossing: What to do with unclear scientific concepts like representation</a>.</li>
</ul>
</li>
</ul>
</li>
</ul>
</li>
</ul>



<p>0:00 - Intro
3:55 - What is a representation to a neuroscientist?
14:44 - How to deal with the dilemma
21:20 - Opposing views
31:00 - What's at stake?
51:10 - Neural-only representation
1:01:11 - When "representation" is playing a useful role
1:12:56 - The role of a neuroscientist
1:39:35 - The purpose of "representational talk"
1:53:03 - Non-representational mental phenomenon
1:55:53 - Final thoughts</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/2057603/c1e-pjzza19r5nu11pmm-z320q3vzij10-0m4naf.mp3" length="123233565"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Support the show to get full episodes, full archive, and join the Discord community.









The Transmitter is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.



Read more about our partnership.



Check out this series of essays about representations:



What are we talking about? Clarifying the fuzzy concept of representation in neuroscience and beyond



Sign up for Brain Inspired email alerts to be notified every time a new Brain Inspired episode is released.



To explore more neuroscience news and perspectives, visit thetransmitter.org.



What do neuroscientists mean when they use the term representation? That's part of what Luis Favela and Edouard Machery set out to answer a couple years ago by surveying lots of folks in the cognitive sciences, and they concluded that as a field the term is used in a confused and unclear way. Confused and unclear are technical terms here, and Luis and Edouard explain what they mean in the episode. More recently Luis and Edouard wrote a follow-up piece arguing that maybe it's okay for everyone to use the term in slightly different ways, maybe it helps communication across disciplines, perhaps. My three other guests today, Frances Egan, Rosa Cao, and John Krakauer wrote responses to that argument, and on today's episode all those folks are here to further discuss that issue and why it matters. Luis is a part philosopher, part cognitive scientists at Indiana University Bloomington, Edouard is a philosopher and Director of the Center for Philosophy of Science at the University of Pittsburgh, Frances is a philosopher from Rutgers University, Rosa is a neuroscientist-turned philosopher at Stanford University, and John is a neuroscientist among other things, and co-runs the Brain, Learning, Animation, and Movement Lab at Johns Hopkins.




Luis Favela.

Favela's book: The Ecological Brain: Unifying the Sciences of Brain, Body, and Environment





Edouard Machery.

Machery's book: Doing without Concepts





Frances Egan.

Egan's book: Deflating Mental Representation.





John Krakauer.



Rosa Cao.

Paper mentioned: Putting representations to use.





The exchange, in order, discussed on this episode:

]]>
                </itunes:summary>
                                                                            <itunes:duration>02:07:09</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 212 John Beggs: Why Brains Seek the Edge of Chaos]]>
                </title>
                <pubDate>Wed, 21 May 2025 04:00:00 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/2043419</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-212-john-beggs-why-brains-seek-the-edge-of-chaos</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes, full archive, and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p><em>The Transmitter</em> is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.</p>



<p>Read more about <a href="https://www.thetransmitter.org/partners/">our partnership</a>.</p>



<p>Sign up for <a href="https://www.thetransmitter.org/newsletters/">Brain Inspired email alerts</a> to be notified every time a new Brain Inspired episode is released.</p>



<p>To explore more neuroscience news and perspectives, visit <a href="http://thetransmitter.org">thetransmitter.org</a>.</p>





<p>You may have heard of the critical brain hypothesis. It goes something like this: brain activity operates near a dynamical regime called criticality, poised at the sweet spot between too much order and too much chaos, and this is a good thing because systems at criticality are optimized for computing, they maximize information transfer, they maximize the time range over which they operate, and a handful of other good properties. John Beggs has been studying criticality in brains for over 20 years now. His 2003 paper with Deitmar Plenz is one of of the first if not the first to show networks of neurons operating near criticality, and it gets cited in almost every criticality paper I read. John runs the Beggs Lab at Indiana University Bloomington, and a few years ago he literally wrote the book on criticality, called <a href="https://amzn.to/4jJMYD5">The Cortex and the Critical Point: Understanding the Power of Emergence</a>, which I highly recommend as an excellent introduction to the topic, and he continues to work on criticality these days.</p>



<p>On this episode we discuss what criticality is, why and how brains might strive for it, the past and present of how to measure it and why there isn't a consensus on how to measure it, what it means that criticality appears in so many natural systems outside of brains yet we want to say it's a special property of brains. These days John spends plenty of effort defending the criticality hypothesis from critics, so we discuss that, and much more.</p>



<ul class="wp-block-list">
<li><a href="http://www.beggslab.com/">Beggs Lab</a>.</li>



<li>Book:
<ul class="wp-block-list">
<li><a href="https://amzn.to/4jJMYD5">The Cortex and the Critical Point: Understanding the Power of Emergence</a></li>
</ul>
</li>



<li>Related papers
<ul class="wp-block-list">
<li><a href="https://www.frontiersin.org/journals/computational-neuroscience/articles/10.3389/fncom.2022.703865/full">Addressing skepticism of the critical brain hypothesis</a></li>
</ul>
</li>



<li>Papers John mentioned:
<ul class="wp-block-list">
<li>Tetzlaff et al 2010: <a href="https://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1001013">Self-organized criticality in developing neuronal networks.</a></li>



<li>Haldeman and Beggs 2005: <a href="https://pubmed.ncbi.nlm.nih.gov/15783702/">Critical Branching Captures Activity in Living Neural Networks and Maximizes the Number of Metastable States</a>.</li>



<li>Bertschinger et al 2004: <a href="https://proceedings.neurips.cc/paper/2004/hash/f8da71e562ff44a2bc7edf3578c593da-Abstract.html">At the edge of chaos: Real-time computations and self-organized criticality in recurrent neural networks</a>.</li>



<li>Legenstein and Maass 2007: <a href="https://igi-web.tugraz.at/people/maass/psfiles/166.pdf">Edge of chaos and prediction of computational performance for neural circuit models.</a></li>



<li>Kinouchi and Copelli 2006: <a href="https://arxiv.org/abs/q-bio/0601037">Optimal dynamical range of excitable networks at...</a></li></ul></li></ul>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Support the show to get full episodes, full archive, and join the Discord community.









The Transmitter is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.



Read more about our partnership.



Sign up for Brain Inspired email alerts to be notified every time a new Brain Inspired episode is released.



To explore more neuroscience news and perspectives, visit thetransmitter.org.





You may have heard of the critical brain hypothesis. It goes something like this: brain activity operates near a dynamical regime called criticality, poised at the sweet spot between too much order and too much chaos, and this is a good thing because systems at criticality are optimized for computing, they maximize information transfer, they maximize the time range over which they operate, and a handful of other good properties. John Beggs has been studying criticality in brains for over 20 years now. His 2003 paper with Deitmar Plenz is one of of the first if not the first to show networks of neurons operating near criticality, and it gets cited in almost every criticality paper I read. John runs the Beggs Lab at Indiana University Bloomington, and a few years ago he literally wrote the book on criticality, called The Cortex and the Critical Point: Understanding the Power of Emergence, which I highly recommend as an excellent introduction to the topic, and he continues to work on criticality these days.



On this episode we discuss what criticality is, why and how brains might strive for it, the past and present of how to measure it and why there isn't a consensus on how to measure it, what it means that criticality appears in so many natural systems outside of brains yet we want to say it's a special property of brains. These days John spends plenty of effort defending the criticality hypothesis from critics, so we discuss that, and much more.




Beggs Lab.



Book:

The Cortex and the Critical Point: Understanding the Power of Emergence





Related papers

Addressing skepticism of the critical brain hypothesis





Papers John mentioned:

Tetzlaff et al 2010: Self-organized criticality in developing neuronal networks.



Haldeman and Beggs 2005: Critical Branching Captures Activity in Living Neural Networks and Maximizes the Number of Metastable States.



Bertschinger et al 2004: At the edge of chaos: Real-time computations and self-organized criticality in recurrent neural networks.



Legenstein and Maass 2007: Edge of chaos and prediction of computational performance for neural circuit models.



Kinouchi and Copelli 2006: Optimal dynamical range of excitable networks at...]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 212 John Beggs: Why Brains Seek the Edge of Chaos]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes, full archive, and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p><em>The Transmitter</em> is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.</p>



<p>Read more about <a href="https://www.thetransmitter.org/partners/">our partnership</a>.</p>



<p>Sign up for <a href="https://www.thetransmitter.org/newsletters/">Brain Inspired email alerts</a> to be notified every time a new Brain Inspired episode is released.</p>



<p>To explore more neuroscience news and perspectives, visit <a href="http://thetransmitter.org">thetransmitter.org</a>.</p>





<p>You may have heard of the critical brain hypothesis. It goes something like this: brain activity operates near a dynamical regime called criticality, poised at the sweet spot between too much order and too much chaos, and this is a good thing because systems at criticality are optimized for computing, they maximize information transfer, they maximize the time range over which they operate, and a handful of other good properties. John Beggs has been studying criticality in brains for over 20 years now. His 2003 paper with Deitmar Plenz is one of of the first if not the first to show networks of neurons operating near criticality, and it gets cited in almost every criticality paper I read. John runs the Beggs Lab at Indiana University Bloomington, and a few years ago he literally wrote the book on criticality, called <a href="https://amzn.to/4jJMYD5">The Cortex and the Critical Point: Understanding the Power of Emergence</a>, which I highly recommend as an excellent introduction to the topic, and he continues to work on criticality these days.</p>



<p>On this episode we discuss what criticality is, why and how brains might strive for it, the past and present of how to measure it and why there isn't a consensus on how to measure it, what it means that criticality appears in so many natural systems outside of brains yet we want to say it's a special property of brains. These days John spends plenty of effort defending the criticality hypothesis from critics, so we discuss that, and much more.</p>



<ul class="wp-block-list">
<li><a href="http://www.beggslab.com/">Beggs Lab</a>.</li>



<li>Book:
<ul class="wp-block-list">
<li><a href="https://amzn.to/4jJMYD5">The Cortex and the Critical Point: Understanding the Power of Emergence</a></li>
</ul>
</li>



<li>Related papers
<ul class="wp-block-list">
<li><a href="https://www.frontiersin.org/journals/computational-neuroscience/articles/10.3389/fncom.2022.703865/full">Addressing skepticism of the critical brain hypothesis</a></li>
</ul>
</li>



<li>Papers John mentioned:
<ul class="wp-block-list">
<li>Tetzlaff et al 2010: <a href="https://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1001013">Self-organized criticality in developing neuronal networks.</a></li>



<li>Haldeman and Beggs 2005: <a href="https://pubmed.ncbi.nlm.nih.gov/15783702/">Critical Branching Captures Activity in Living Neural Networks and Maximizes the Number of Metastable States</a>.</li>



<li>Bertschinger et al 2004: <a href="https://proceedings.neurips.cc/paper/2004/hash/f8da71e562ff44a2bc7edf3578c593da-Abstract.html">At the edge of chaos: Real-time computations and self-organized criticality in recurrent neural networks</a>.</li>



<li>Legenstein and Maass 2007: <a href="https://igi-web.tugraz.at/people/maass/psfiles/166.pdf">Edge of chaos and prediction of computational performance for neural circuit models.</a></li>



<li>Kinouchi and Copelli 2006: <a href="https://arxiv.org/abs/q-bio/0601037">Optimal dynamical range of excitable networks at criticality.</a></li>



<li>Chialvo 2010: <a href="https://arxiv.org/abs/1010.2530">Emergent complex neural dynamics.</a>.</li>



<li>Mora and Bialek 2011: <a href="https://www.princeton.edu/~wbialek/our_papers/mora+bialek_11.pdf">Are Biological Systems Poised at Criticality?</a></li>
</ul>
</li>
</ul>



<p><a href="https://www.thetransmitter.org/wp-content/uploads/2025/05/BI-212-transcript-production.pdf" target="_blank" rel="noreferrer noopener">Read the transcript.</a></p>



<p>0:00 - Intro
4:28 - What is criticality?
10:19 - Why is criticality special in brains?
15:34 - Measuring criticality
24:28 - Dynamic range and criticality
28:28 - Criticisms of criticality
31:43 - Current state of critical brain hypothesis
33:34 - Causality and criticality
36:39 - Criticality as a homeostatic set point
38:49 - Is criticality necessary for life?
50:15 - Shooting for criticality far from thermodynamic equilibrium
52:45 - Quasi- and near-criticality
55:03 - Cortex vs. whole brain
58:50 - Structural criticality through development
1:01:09 - Criticality in AI
1:03:56 - Most pressing criticisms of criticality
1:10:08 - Gradients of criticality
1:22:30 - Homeostasis vs. criticality
1:29:57 - Minds and criticality</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/2043419/c1e-7kpph92pkwu97z4n-gp31q2j5f61d-zyj6ri.mp3" length="91379108"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Support the show to get full episodes, full archive, and join the Discord community.









The Transmitter is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.



Read more about our partnership.



Sign up for Brain Inspired email alerts to be notified every time a new Brain Inspired episode is released.



To explore more neuroscience news and perspectives, visit thetransmitter.org.





You may have heard of the critical brain hypothesis. It goes something like this: brain activity operates near a dynamical regime called criticality, poised at the sweet spot between too much order and too much chaos, and this is a good thing because systems at criticality are optimized for computing, they maximize information transfer, they maximize the time range over which they operate, and a handful of other good properties. John Beggs has been studying criticality in brains for over 20 years now. His 2003 paper with Deitmar Plenz is one of of the first if not the first to show networks of neurons operating near criticality, and it gets cited in almost every criticality paper I read. John runs the Beggs Lab at Indiana University Bloomington, and a few years ago he literally wrote the book on criticality, called The Cortex and the Critical Point: Understanding the Power of Emergence, which I highly recommend as an excellent introduction to the topic, and he continues to work on criticality these days.



On this episode we discuss what criticality is, why and how brains might strive for it, the past and present of how to measure it and why there isn't a consensus on how to measure it, what it means that criticality appears in so many natural systems outside of brains yet we want to say it's a special property of brains. These days John spends plenty of effort defending the criticality hypothesis from critics, so we discuss that, and much more.




Beggs Lab.



Book:

The Cortex and the Critical Point: Understanding the Power of Emergence





Related papers

Addressing skepticism of the critical brain hypothesis





Papers John mentioned:

Tetzlaff et al 2010: Self-organized criticality in developing neuronal networks.



Haldeman and Beggs 2005: Critical Branching Captures Activity in Living Neural Networks and Maximizes the Number of Metastable States.



Bertschinger et al 2004: At the edge of chaos: Real-time computations and self-organized criticality in recurrent neural networks.



Legenstein and Maass 2007: Edge of chaos and prediction of computational performance for neural circuit models.



Kinouchi and Copelli 2006: Optimal dynamical range of excitable networks at...]]>
                </itunes:summary>
                                                                            <itunes:duration>01:33:34</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 211 COGITATE: Testing Theories of Consciousness]]>
                </title>
                <pubDate>Wed, 07 May 2025 04:00:00 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/2025990</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-211-cogitate-testing-theories-of-consciousness</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes, full archive, and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p><em>The Transmitter</em> is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.</p>



<p>Read more about <a href="https://www.thetransmitter.org/partners/">our partnership</a>.</p>



<p>Sign up for <a href="https://www.thetransmitter.org/newsletters/">Brain Inspired email alerts</a> to be notified every time a new Brain Inspired episode is released.</p>



<p>To explore more neuroscience news and perspectives, visit <a href="http://thetransmitter.org">thetransmitter.org</a>.</p>



<p><a href="https://en-sagol.tau.ac.il/Rony_Hirschhorn">Rony Hirschhorn</a>, <a href="https://alexlepauvre.github.io/AlexLepauvre/">Alex Lepauvre</a>, and <a href="https://www.birmingham.ac.uk/staff/profiles/psychology/ferrante-oscar">Oscar Ferrante</a> are three of many many scientists that comprise the <a href="https://www.arc-cogitate.com/">COGITATE</a> group. COGITATE is an adversarial collaboration project to test theories of consciousness in humans, in this case testing the integrated information theory of consciousness and the global neuronal workspace theory of consciousness. I said it's an adversarial collaboration, so what does that mean. It's adversarial in that two theories of consciousness are being pitted against each other. It's a collaboration in that the proponents of the two theories had to agree on what experiments could be performed that could possibly falsify the claims of either theory. The group has just published the results of the first round of experiments in a paper titled <a href="https://www.nature.com/articles/s41586-025-08888-1">Adversarial testing of global neuronal workspace and integrated information theories of consciousness</a>, and this is what Rony, Alex, and Oscar discuss with me today.</p>



<p>The short summary is that they used a simple task and measured brain activity with three different methods: EEG, MEG, and fMRI, and made predictions about where in the brain correlates of consciousness should be, how that activity should be maintained over time, and what kind of functional connectivity patterns should be present between brain regions. The take home is a mixed bag, with neither theory being fully falsified, but with a ton of data and results for the world to ponder and build on, to hopefully continue to refine and develop theoretical accounts of how brains and consciousness are related.</p>



<p>So we discuss the project itself, many of the challenges they faced, their experiences and reflections working on it and on coming together as a team, the nature of working on an adversarial collaboration, when so much is at stake for the proponents of each theory, and, as you heard last episode with <a href="https://braininspired.co/podcast/210/">Dean Buonomano</a>, when one of the theories, IIT, is surrounded by a bit of controversy itself regarding whether it should even be considered a scientific theory.</p>



<ul class="wp-block-list">
<li><a href="https://www.arc-cogitate.com/">COGITATE</a>.</li>



<li><a href="https://www.birmingham.ac.uk/staff/profiles/psychology/ferrante-oscar">Oscar Ferrante</a>. <a href="https://x.com/ferrante_oscar">@ferrante_oscar</a></li>



<li><a href="https://en-sagol.tau.ac.il/Rony_Hirschhorn">Rony Hirschhorn</a>. <a href="https://x.com/RonyHirsch">@RonyHirsch</a></li>



<li><a href="https://alexlepauvre.github.io/AlexLepauvre/">Alex Lepauvre</a>. <a href="https://x.com/lepauvrealex">@LepauvreAlex</a></li>



<li>Paper: <a href="https://www.nature.com/articles/s41586-025-08888-1">Adversarial testing of global neur...</a></li></ul>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Support the show to get full episodes, full archive, and join the Discord community.









The Transmitter is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.



Read more about our partnership.



Sign up for Brain Inspired email alerts to be notified every time a new Brain Inspired episode is released.



To explore more neuroscience news and perspectives, visit thetransmitter.org.



Rony Hirschhorn, Alex Lepauvre, and Oscar Ferrante are three of many many scientists that comprise the COGITATE group. COGITATE is an adversarial collaboration project to test theories of consciousness in humans, in this case testing the integrated information theory of consciousness and the global neuronal workspace theory of consciousness. I said it's an adversarial collaboration, so what does that mean. It's adversarial in that two theories of consciousness are being pitted against each other. It's a collaboration in that the proponents of the two theories had to agree on what experiments could be performed that could possibly falsify the claims of either theory. The group has just published the results of the first round of experiments in a paper titled Adversarial testing of global neuronal workspace and integrated information theories of consciousness, and this is what Rony, Alex, and Oscar discuss with me today.



The short summary is that they used a simple task and measured brain activity with three different methods: EEG, MEG, and fMRI, and made predictions about where in the brain correlates of consciousness should be, how that activity should be maintained over time, and what kind of functional connectivity patterns should be present between brain regions. The take home is a mixed bag, with neither theory being fully falsified, but with a ton of data and results for the world to ponder and build on, to hopefully continue to refine and develop theoretical accounts of how brains and consciousness are related.



So we discuss the project itself, many of the challenges they faced, their experiences and reflections working on it and on coming together as a team, the nature of working on an adversarial collaboration, when so much is at stake for the proponents of each theory, and, as you heard last episode with Dean Buonomano, when one of the theories, IIT, is surrounded by a bit of controversy itself regarding whether it should even be considered a scientific theory.




COGITATE.



Oscar Ferrante. @ferrante_oscar



Rony Hirschhorn. @RonyHirsch



Alex Lepauvre. @LepauvreAlex



Paper: Adversarial testing of global neur...]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 211 COGITATE: Testing Theories of Consciousness]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes, full archive, and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p><em>The Transmitter</em> is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.</p>



<p>Read more about <a href="https://www.thetransmitter.org/partners/">our partnership</a>.</p>



<p>Sign up for <a href="https://www.thetransmitter.org/newsletters/">Brain Inspired email alerts</a> to be notified every time a new Brain Inspired episode is released.</p>



<p>To explore more neuroscience news and perspectives, visit <a href="http://thetransmitter.org">thetransmitter.org</a>.</p>



<p><a href="https://en-sagol.tau.ac.il/Rony_Hirschhorn">Rony Hirschhorn</a>, <a href="https://alexlepauvre.github.io/AlexLepauvre/">Alex Lepauvre</a>, and <a href="https://www.birmingham.ac.uk/staff/profiles/psychology/ferrante-oscar">Oscar Ferrante</a> are three of many many scientists that comprise the <a href="https://www.arc-cogitate.com/">COGITATE</a> group. COGITATE is an adversarial collaboration project to test theories of consciousness in humans, in this case testing the integrated information theory of consciousness and the global neuronal workspace theory of consciousness. I said it's an adversarial collaboration, so what does that mean. It's adversarial in that two theories of consciousness are being pitted against each other. It's a collaboration in that the proponents of the two theories had to agree on what experiments could be performed that could possibly falsify the claims of either theory. The group has just published the results of the first round of experiments in a paper titled <a href="https://www.nature.com/articles/s41586-025-08888-1">Adversarial testing of global neuronal workspace and integrated information theories of consciousness</a>, and this is what Rony, Alex, and Oscar discuss with me today.</p>



<p>The short summary is that they used a simple task and measured brain activity with three different methods: EEG, MEG, and fMRI, and made predictions about where in the brain correlates of consciousness should be, how that activity should be maintained over time, and what kind of functional connectivity patterns should be present between brain regions. The take home is a mixed bag, with neither theory being fully falsified, but with a ton of data and results for the world to ponder and build on, to hopefully continue to refine and develop theoretical accounts of how brains and consciousness are related.</p>



<p>So we discuss the project itself, many of the challenges they faced, their experiences and reflections working on it and on coming together as a team, the nature of working on an adversarial collaboration, when so much is at stake for the proponents of each theory, and, as you heard last episode with <a href="https://braininspired.co/podcast/210/">Dean Buonomano</a>, when one of the theories, IIT, is surrounded by a bit of controversy itself regarding whether it should even be considered a scientific theory.</p>



<ul class="wp-block-list">
<li><a href="https://www.arc-cogitate.com/">COGITATE</a>.</li>



<li><a href="https://www.birmingham.ac.uk/staff/profiles/psychology/ferrante-oscar">Oscar Ferrante</a>. <a href="https://x.com/ferrante_oscar">@ferrante_oscar</a></li>



<li><a href="https://en-sagol.tau.ac.il/Rony_Hirschhorn">Rony Hirschhorn</a>. <a href="https://x.com/RonyHirsch">@RonyHirsch</a></li>



<li><a href="https://alexlepauvre.github.io/AlexLepauvre/">Alex Lepauvre</a>. <a href="https://x.com/lepauvrealex">@LepauvreAlex</a></li>



<li>Paper: <a href="https://www.nature.com/articles/s41586-025-08888-1">Adversarial testing of global neuronal workspace and integrated information theories of consciousness</a>.</li>



<li><a href="https://braininspired.co/podcast/210/">BI 210 Dean Buonomano: Consciousness, Time, and Organotypic Dynamics</a></li>
</ul>



<p>Read the <a href="https://www.thetransmitter.org/wp-content/uploads/2025/05/Brain-Inspired-211-transcript-1.pdf" target="_blank" rel="noreferrer noopener">transcript</a>.</p>



<p>0:00 - Intro
4:00 - COGITATE
17:42 - How the experiments were developed
32:37 - How data was collected and analyzed
41:24 - Prediction 1: Where is consciousness?
47:51 - The experimental task
1:00:14 - Prediction 2: Duration of consciousness-related activity
1:18:37 - Prediction 3: Inter-areal communication
1:28:28 - Big picture of the results
1:44:25 - Moving forward</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/2025990/c1e-6wvvcoz3qofwqzm5-rk4o3mq7un04-ebmlp1.mp3" length="115845022"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Support the show to get full episodes, full archive, and join the Discord community.









The Transmitter is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.



Read more about our partnership.



Sign up for Brain Inspired email alerts to be notified every time a new Brain Inspired episode is released.



To explore more neuroscience news and perspectives, visit thetransmitter.org.



Rony Hirschhorn, Alex Lepauvre, and Oscar Ferrante are three of many many scientists that comprise the COGITATE group. COGITATE is an adversarial collaboration project to test theories of consciousness in humans, in this case testing the integrated information theory of consciousness and the global neuronal workspace theory of consciousness. I said it's an adversarial collaboration, so what does that mean. It's adversarial in that two theories of consciousness are being pitted against each other. It's a collaboration in that the proponents of the two theories had to agree on what experiments could be performed that could possibly falsify the claims of either theory. The group has just published the results of the first round of experiments in a paper titled Adversarial testing of global neuronal workspace and integrated information theories of consciousness, and this is what Rony, Alex, and Oscar discuss with me today.



The short summary is that they used a simple task and measured brain activity with three different methods: EEG, MEG, and fMRI, and made predictions about where in the brain correlates of consciousness should be, how that activity should be maintained over time, and what kind of functional connectivity patterns should be present between brain regions. The take home is a mixed bag, with neither theory being fully falsified, but with a ton of data and results for the world to ponder and build on, to hopefully continue to refine and develop theoretical accounts of how brains and consciousness are related.



So we discuss the project itself, many of the challenges they faced, their experiences and reflections working on it and on coming together as a team, the nature of working on an adversarial collaboration, when so much is at stake for the proponents of each theory, and, as you heard last episode with Dean Buonomano, when one of the theories, IIT, is surrounded by a bit of controversy itself regarding whether it should even be considered a scientific theory.




COGITATE.



Oscar Ferrante. @ferrante_oscar



Rony Hirschhorn. @RonyHirsch



Alex Lepauvre. @LepauvreAlex



Paper: Adversarial testing of global neur...]]>
                </itunes:summary>
                                                                            <itunes:duration>01:59:40</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 210 Dean Buonomano: Consciousness, Time, and Organotypic Dynamics]]>
                </title>
                <pubDate>Tue, 22 Apr 2025 04:00:00 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/2018176</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-210-dean-buonomano-consciousness-time-and-organotypic-dynamics-1</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes, full archive, and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p>Dean Buonomano runs the Buonomano lab at UCLA. Dean was a guest on Brain Inspired way back on episode 18, where we talked about his book <a href="https://amzn.to/42hYfog">Your Brain is a Time Machine: The Neuroscience and Physics of Time</a>, which details much of his thought and research about how centrally important time is for virtually everything we do, different conceptions of time in philosophy, and how how brains might tell time. That was almost 7 years ago, and his work on time and dynamics in computational neuroscience continues.</p>



<p>One thing we discuss today, later in the episode, is his recent work using organotypic brain slices to test the idea that cortical circuits implement timing as a computational primitive it's something they do by they're very nature. Organotypic brain slices are between what I think of as traditional brain slices and full on organoids. Brain slices are extracted from an organism, and maintained in a brain-like fluid while you perform experiments on them. Organoids start with a small amount of cells that you the culture, and let them divide and grow and specialize, until you have a mass of cells that have grown into an organ of some sort, to then perform experiments on. Organotypic brain slices are extracted from an organism, like brain slices, but then also cultured for some time to let them settle back into some sort of near-homeostatic point - to them as close as you can to what they're like in the intact brain... then perform experiments on them. Dean and his colleagues use optigenetics to train their brain slices to predict the timing of the stimuli, and they find the populations of neurons do indeed learn to predict the timing of the stimuli, and that they exhibit replaying of those sequences similar to the replay seen in brain areas like the hippocampus.</p>



<p>But, we begin our conversation talking about Dean's recent piece in The Transmitter, that I'll point to in the show notes, called <a href="https://www.thetransmitter.org/neuroai/the-brain-holds-no-exclusive-rights-on-how-to-create-intelligence/">The brain holds no exclusive rights on how to create intelligence</a>. There he argues that modern AI is likely to continue its recent successes despite the ongoing divergence between AI and neuroscience. This is in contrast to what folks in NeuroAI believe.</p>



<p>We then talk about his recent chapter with physicist Carlo Rovelli, titled <a href="https://www.buonomanolab.com/Publications/BuonomanoRovelli_BridgingNeurosciencePhysicsTime_TimeScience_2023.pdf">Bridging the neuroscience and physics of time</a>, in which Dean and Carlo examine where neuroscience and physics disagree and where they agree about the nature of time.</p>



<p>Finally, we discuss Dean's thoughts on the integrated information theory of consciousness, or IIT. IIT has see a little controversy lately. Over 100 scientists, a large part of that group calling themselves IIT-Concerned, have expressed concern that IIT is actually unscientific. This has cause backlash and anti-backlash, and all sorts of fun expression from many interested people. Dean explains his own views about why he thinks IIT is not in the purview of science - namely that it doesn't play well with the existing ontology of what physics says about science. What I just said doesn't do justice to his arguments, which he articulates much better. </p>



<ul class="wp-block-list">
<li><a href="https://www.buonomanolab.com/">Buonomano lab</a>.</li>



<li>Twitter: <a href="https://x.com/DeanBuono">@DeanBuono</a>.</li>



<li>Related papers
<ul class="wp-block-list">
<li><a href="https://www.thetransmitter.org/neuroai/the-brain-holds-no-exclusive-rights-on-how-to-create-intelligence/">The brain...</a></li></ul></li></ul>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Support the show to get full episodes, full archive, and join the Discord community.









Dean Buonomano runs the Buonomano lab at UCLA. Dean was a guest on Brain Inspired way back on episode 18, where we talked about his book Your Brain is a Time Machine: The Neuroscience and Physics of Time, which details much of his thought and research about how centrally important time is for virtually everything we do, different conceptions of time in philosophy, and how how brains might tell time. That was almost 7 years ago, and his work on time and dynamics in computational neuroscience continues.



One thing we discuss today, later in the episode, is his recent work using organotypic brain slices to test the idea that cortical circuits implement timing as a computational primitive it's something they do by they're very nature. Organotypic brain slices are between what I think of as traditional brain slices and full on organoids. Brain slices are extracted from an organism, and maintained in a brain-like fluid while you perform experiments on them. Organoids start with a small amount of cells that you the culture, and let them divide and grow and specialize, until you have a mass of cells that have grown into an organ of some sort, to then perform experiments on. Organotypic brain slices are extracted from an organism, like brain slices, but then also cultured for some time to let them settle back into some sort of near-homeostatic point - to them as close as you can to what they're like in the intact brain... then perform experiments on them. Dean and his colleagues use optigenetics to train their brain slices to predict the timing of the stimuli, and they find the populations of neurons do indeed learn to predict the timing of the stimuli, and that they exhibit replaying of those sequences similar to the replay seen in brain areas like the hippocampus.



But, we begin our conversation talking about Dean's recent piece in The Transmitter, that I'll point to in the show notes, called The brain holds no exclusive rights on how to create intelligence. There he argues that modern AI is likely to continue its recent successes despite the ongoing divergence between AI and neuroscience. This is in contrast to what folks in NeuroAI believe.



We then talk about his recent chapter with physicist Carlo Rovelli, titled Bridging the neuroscience and physics of time, in which Dean and Carlo examine where neuroscience and physics disagree and where they agree about the nature of time.



Finally, we discuss Dean's thoughts on the integrated information theory of consciousness, or IIT. IIT has see a little controversy lately. Over 100 scientists, a large part of that group calling themselves IIT-Concerned, have expressed concern that IIT is actually unscientific. This has cause backlash and anti-backlash, and all sorts of fun expression from many interested people. Dean explains his own views about why he thinks IIT is not in the purview of science - namely that it doesn't play well with the existing ontology of what physics says about science. What I just said doesn't do justice to his arguments, which he articulates much better. 




Buonomano lab.



Twitter: @DeanBuono.



Related papers

The brain...]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 210 Dean Buonomano: Consciousness, Time, and Organotypic Dynamics]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes, full archive, and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p>Dean Buonomano runs the Buonomano lab at UCLA. Dean was a guest on Brain Inspired way back on episode 18, where we talked about his book <a href="https://amzn.to/42hYfog">Your Brain is a Time Machine: The Neuroscience and Physics of Time</a>, which details much of his thought and research about how centrally important time is for virtually everything we do, different conceptions of time in philosophy, and how how brains might tell time. That was almost 7 years ago, and his work on time and dynamics in computational neuroscience continues.</p>



<p>One thing we discuss today, later in the episode, is his recent work using organotypic brain slices to test the idea that cortical circuits implement timing as a computational primitive it's something they do by they're very nature. Organotypic brain slices are between what I think of as traditional brain slices and full on organoids. Brain slices are extracted from an organism, and maintained in a brain-like fluid while you perform experiments on them. Organoids start with a small amount of cells that you the culture, and let them divide and grow and specialize, until you have a mass of cells that have grown into an organ of some sort, to then perform experiments on. Organotypic brain slices are extracted from an organism, like brain slices, but then also cultured for some time to let them settle back into some sort of near-homeostatic point - to them as close as you can to what they're like in the intact brain... then perform experiments on them. Dean and his colleagues use optigenetics to train their brain slices to predict the timing of the stimuli, and they find the populations of neurons do indeed learn to predict the timing of the stimuli, and that they exhibit replaying of those sequences similar to the replay seen in brain areas like the hippocampus.</p>



<p>But, we begin our conversation talking about Dean's recent piece in The Transmitter, that I'll point to in the show notes, called <a href="https://www.thetransmitter.org/neuroai/the-brain-holds-no-exclusive-rights-on-how-to-create-intelligence/">The brain holds no exclusive rights on how to create intelligence</a>. There he argues that modern AI is likely to continue its recent successes despite the ongoing divergence between AI and neuroscience. This is in contrast to what folks in NeuroAI believe.</p>



<p>We then talk about his recent chapter with physicist Carlo Rovelli, titled <a href="https://www.buonomanolab.com/Publications/BuonomanoRovelli_BridgingNeurosciencePhysicsTime_TimeScience_2023.pdf">Bridging the neuroscience and physics of time</a>, in which Dean and Carlo examine where neuroscience and physics disagree and where they agree about the nature of time.</p>



<p>Finally, we discuss Dean's thoughts on the integrated information theory of consciousness, or IIT. IIT has see a little controversy lately. Over 100 scientists, a large part of that group calling themselves IIT-Concerned, have expressed concern that IIT is actually unscientific. This has cause backlash and anti-backlash, and all sorts of fun expression from many interested people. Dean explains his own views about why he thinks IIT is not in the purview of science - namely that it doesn't play well with the existing ontology of what physics says about science. What I just said doesn't do justice to his arguments, which he articulates much better. </p>



<ul class="wp-block-list">
<li><a href="https://www.buonomanolab.com/">Buonomano lab</a>.</li>



<li>Twitter: <a href="https://x.com/DeanBuono">@DeanBuono</a>.</li>



<li>Related papers
<ul class="wp-block-list">
<li><a href="https://www.thetransmitter.org/neuroai/the-brain-holds-no-exclusive-rights-on-how-to-create-intelligence/">The brain holds no exclusive rights on how to create intelligence</a>.</li>



<li><a href="https://www.nature.com/articles/s41593-025-01881-x">What makes a theory of consciousness unscientific?</a></li>



<li><a href="https://www.nature.com/articles/s41467-025-58013-z">Ex vivo cortical circuits learn to predict and spontaneously replay temporal patterns</a>.</li>



<li><a href="https://www.buonomanolab.com/Publications/BuonomanoRovelli_BridgingNeurosciencePhysicsTime_TimeScience_2023.pdf">Bridging the neuroscience and physics of time</a>.</li>
</ul>
</li>



<li><a href="https://braininspired.co/podcast/204/">BI 204 David Robbe: Your Brain Doesn’t Measure Time</a></li>
</ul>



<p>Read the <a href="https://www.thetransmitter.org/wp-content/uploads/2025/04/Brain-inspired-210-transcript-dean-buonomano-final.pdf" target="_blank" rel="noreferrer noopener">transcript</a>.</p>



<p>0:00 - Intro
8:49 - AI doesn't need biology
17:52 - Time in physics and in neuroscience
34:04 - Integrated information theory
1:01:34 - Global neuronal workspace theory
1:07:46 - Organotypic slices and predictive processing
1:26:07 - Do brains actually measure time? David Robbe</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/2018176/c1e-q488idn5mwck770g-6zo3mv74f529-kxpv7a.mp3" length="106907242"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Support the show to get full episodes, full archive, and join the Discord community.









Dean Buonomano runs the Buonomano lab at UCLA. Dean was a guest on Brain Inspired way back on episode 18, where we talked about his book Your Brain is a Time Machine: The Neuroscience and Physics of Time, which details much of his thought and research about how centrally important time is for virtually everything we do, different conceptions of time in philosophy, and how how brains might tell time. That was almost 7 years ago, and his work on time and dynamics in computational neuroscience continues.



One thing we discuss today, later in the episode, is his recent work using organotypic brain slices to test the idea that cortical circuits implement timing as a computational primitive it's something they do by they're very nature. Organotypic brain slices are between what I think of as traditional brain slices and full on organoids. Brain slices are extracted from an organism, and maintained in a brain-like fluid while you perform experiments on them. Organoids start with a small amount of cells that you the culture, and let them divide and grow and specialize, until you have a mass of cells that have grown into an organ of some sort, to then perform experiments on. Organotypic brain slices are extracted from an organism, like brain slices, but then also cultured for some time to let them settle back into some sort of near-homeostatic point - to them as close as you can to what they're like in the intact brain... then perform experiments on them. Dean and his colleagues use optigenetics to train their brain slices to predict the timing of the stimuli, and they find the populations of neurons do indeed learn to predict the timing of the stimuli, and that they exhibit replaying of those sequences similar to the replay seen in brain areas like the hippocampus.



But, we begin our conversation talking about Dean's recent piece in The Transmitter, that I'll point to in the show notes, called The brain holds no exclusive rights on how to create intelligence. There he argues that modern AI is likely to continue its recent successes despite the ongoing divergence between AI and neuroscience. This is in contrast to what folks in NeuroAI believe.



We then talk about his recent chapter with physicist Carlo Rovelli, titled Bridging the neuroscience and physics of time, in which Dean and Carlo examine where neuroscience and physics disagree and where they agree about the nature of time.



Finally, we discuss Dean's thoughts on the integrated information theory of consciousness, or IIT. IIT has see a little controversy lately. Over 100 scientists, a large part of that group calling themselves IIT-Concerned, have expressed concern that IIT is actually unscientific. This has cause backlash and anti-backlash, and all sorts of fun expression from many interested people. Dean explains his own views about why he thinks IIT is not in the purview of science - namely that it doesn't play well with the existing ontology of what physics says about science. What I just said doesn't do justice to his arguments, which he articulates much better. 




Buonomano lab.



Twitter: @DeanBuono.



Related papers

The brain...]]>
                </itunes:summary>
                                                                            <itunes:duration>01:50:33</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 209 Aran Nayebi: The NeuroAI Turing Test]]>
                </title>
                <pubDate>Wed, 09 Apr 2025 04:00:00 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/2009901</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-209-aran-nayebi-the-neuroai-turing-test-1</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes, full archive, and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p><em>The Transmitter</em> is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.</p>



<p>Read more about <a href="https://www.thetransmitter.org/partners/">our partnership</a>.</p>



<p>Sign up for the “Brain Inspired” <a href="https://www.thetransmitter.org/newsletters/">email alerts</a> to be notified every time a new “Brain Inspired” episode is released<a href="https://www.thetransmitter.org/newsletters/">.</a></p>



<p>To explore more neuroscience news and perspectives, visit <a href="http://thetransmitter.org">thetransmitter.org</a>.</p>



<p>Aran Nayebi is an Assistant Professor at Carnegie Mellon University in the Machine Learning Department. He was there in the early days of using convolutional neural networks to explain how our brains perform object recognition, and since then he's a had a whirlwind trajectory through different AI architectures and algorithms and how they relate to biological architectures and algorithms, so we touch on some of what he has studied in that regard. But he also recently started his own lab, at CMU, and he has plans to integrate much of what he has learned to eventually develop autonomous agents that perform the tasks we want them to perform in similar at least ways that our brains perform them. So we discuss his ongoing plans to reverse-engineer our intelligence to build useful cognitive architectures of that sort.</p>



<p>We also discuss Aran's suggestion that, at least in the NeuroAI world, the Turing test needs to be updated to include some measure of similarity of the internal representations used to achieve the various tasks the models perform. By internal representations, as we discuss, he means the population-level activity in the neural networks, not the mental representations philosophy of mind often refers to, or other philosophical notions of the term representation.</p>



<ul class="wp-block-list">
<li><a href="https://anayebi.github.io/">Aran's Website</a>.</li>



<li>Twitter: <a href="https://www.twitter.com/aran_nayebi">@ayan_nayebi</a>.</li>



<li>Related papers
<ul class="wp-block-list">
<li><a href="https://arxiv.org/abs/2502.16238">Brain-model evaluations need the NeuroAI Turing Test</a>.</li>



<li><a href="https://arxiv.org/abs/2502.05934">Barriers and pathways to human-AI alignment: a game-theoretic approach</a>.</li>
</ul>
</li>
</ul>



<p>0:00 - Intro
5:24 - Background
20:46 - Building embodied agents
33:00 - Adaptability
49:25 - Marr's levels
54:12 - Sensorimotor loop and intrinsic goals
1:00:05 - NeuroAI Turing Test
1:18:18 - Representations
1:28:18 - How to know what to measure
1:32:56 - AI safety</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Support the show to get full episodes, full archive, and join the Discord community.









The Transmitter is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.



Read more about our partnership.



Sign up for the “Brain Inspired” email alerts to be notified every time a new “Brain Inspired” episode is released.



To explore more neuroscience news and perspectives, visit thetransmitter.org.



Aran Nayebi is an Assistant Professor at Carnegie Mellon University in the Machine Learning Department. He was there in the early days of using convolutional neural networks to explain how our brains perform object recognition, and since then he's a had a whirlwind trajectory through different AI architectures and algorithms and how they relate to biological architectures and algorithms, so we touch on some of what he has studied in that regard. But he also recently started his own lab, at CMU, and he has plans to integrate much of what he has learned to eventually develop autonomous agents that perform the tasks we want them to perform in similar at least ways that our brains perform them. So we discuss his ongoing plans to reverse-engineer our intelligence to build useful cognitive architectures of that sort.



We also discuss Aran's suggestion that, at least in the NeuroAI world, the Turing test needs to be updated to include some measure of similarity of the internal representations used to achieve the various tasks the models perform. By internal representations, as we discuss, he means the population-level activity in the neural networks, not the mental representations philosophy of mind often refers to, or other philosophical notions of the term representation.




Aran's Website.



Twitter: @ayan_nayebi.



Related papers

Brain-model evaluations need the NeuroAI Turing Test.



Barriers and pathways to human-AI alignment: a game-theoretic approach.






0:00 - Intro
5:24 - Background
20:46 - Building embodied agents
33:00 - Adaptability
49:25 - Marr's levels
54:12 - Sensorimotor loop and intrinsic goals
1:00:05 - NeuroAI Turing Test
1:18:18 - Representations
1:28:18 - How to know what to measure
1:32:56 - AI safety]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 209 Aran Nayebi: The NeuroAI Turing Test]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes, full archive, and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p><em>The Transmitter</em> is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.</p>



<p>Read more about <a href="https://www.thetransmitter.org/partners/">our partnership</a>.</p>



<p>Sign up for the “Brain Inspired” <a href="https://www.thetransmitter.org/newsletters/">email alerts</a> to be notified every time a new “Brain Inspired” episode is released<a href="https://www.thetransmitter.org/newsletters/">.</a></p>



<p>To explore more neuroscience news and perspectives, visit <a href="http://thetransmitter.org">thetransmitter.org</a>.</p>



<p>Aran Nayebi is an Assistant Professor at Carnegie Mellon University in the Machine Learning Department. He was there in the early days of using convolutional neural networks to explain how our brains perform object recognition, and since then he's a had a whirlwind trajectory through different AI architectures and algorithms and how they relate to biological architectures and algorithms, so we touch on some of what he has studied in that regard. But he also recently started his own lab, at CMU, and he has plans to integrate much of what he has learned to eventually develop autonomous agents that perform the tasks we want them to perform in similar at least ways that our brains perform them. So we discuss his ongoing plans to reverse-engineer our intelligence to build useful cognitive architectures of that sort.</p>



<p>We also discuss Aran's suggestion that, at least in the NeuroAI world, the Turing test needs to be updated to include some measure of similarity of the internal representations used to achieve the various tasks the models perform. By internal representations, as we discuss, he means the population-level activity in the neural networks, not the mental representations philosophy of mind often refers to, or other philosophical notions of the term representation.</p>



<ul class="wp-block-list">
<li><a href="https://anayebi.github.io/">Aran's Website</a>.</li>



<li>Twitter: <a href="https://www.twitter.com/aran_nayebi">@ayan_nayebi</a>.</li>



<li>Related papers
<ul class="wp-block-list">
<li><a href="https://arxiv.org/abs/2502.16238">Brain-model evaluations need the NeuroAI Turing Test</a>.</li>



<li><a href="https://arxiv.org/abs/2502.05934">Barriers and pathways to human-AI alignment: a game-theoretic approach</a>.</li>
</ul>
</li>
</ul>



<p>0:00 - Intro
5:24 - Background
20:46 - Building embodied agents
33:00 - Adaptability
49:25 - Marr's levels
54:12 - Sensorimotor loop and intrinsic goals
1:00:05 - NeuroAI Turing Test
1:18:18 - Representations
1:28:18 - How to know what to measure
1:32:56 - AI safety</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/2009901/c1e-vzkkt7jkkoco8oqx-47k11xdvtk8x-eazxpm.mp3" length="100790804"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Support the show to get full episodes, full archive, and join the Discord community.









The Transmitter is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.



Read more about our partnership.



Sign up for the “Brain Inspired” email alerts to be notified every time a new “Brain Inspired” episode is released.



To explore more neuroscience news and perspectives, visit thetransmitter.org.



Aran Nayebi is an Assistant Professor at Carnegie Mellon University in the Machine Learning Department. He was there in the early days of using convolutional neural networks to explain how our brains perform object recognition, and since then he's a had a whirlwind trajectory through different AI architectures and algorithms and how they relate to biological architectures and algorithms, so we touch on some of what he has studied in that regard. But he also recently started his own lab, at CMU, and he has plans to integrate much of what he has learned to eventually develop autonomous agents that perform the tasks we want them to perform in similar at least ways that our brains perform them. So we discuss his ongoing plans to reverse-engineer our intelligence to build useful cognitive architectures of that sort.



We also discuss Aran's suggestion that, at least in the NeuroAI world, the Turing test needs to be updated to include some measure of similarity of the internal representations used to achieve the various tasks the models perform. By internal representations, as we discuss, he means the population-level activity in the neural networks, not the mental representations philosophy of mind often refers to, or other philosophical notions of the term representation.




Aran's Website.



Twitter: @ayan_nayebi.



Related papers

Brain-model evaluations need the NeuroAI Turing Test.



Barriers and pathways to human-AI alignment: a game-theoretic approach.






0:00 - Intro
5:24 - Background
20:46 - Building embodied agents
33:00 - Adaptability
49:25 - Marr's levels
54:12 - Sensorimotor loop and intrinsic goals
1:00:05 - NeuroAI Turing Test
1:18:18 - Representations
1:28:18 - How to know what to measure
1:32:56 - AI safety]]>
                </itunes:summary>
                                                                            <itunes:duration>01:43:59</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 208 Gabriele Scheler: From Verbal Thought to Neuron Computation]]>
                </title>
                <pubDate>Wed, 26 Mar 2025 04:00:00 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/1999568</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-208-gabriele-scheler-from-verbal-thought-to-neuron-computation</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes, full archive, and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p>Gabriele Scheler co-founded the <a href="https://www.theoretical-biology.org/">Carl Correns Foundation for Mathematical Biology</a>. Carl Correns was her great grandfather, one of the early pioneers in genetics. Gabriele is a computational neuroscientist, whose goal is to build models of cellular computation, and much of her focus is on neurons.</p>



<p>We discuss her theoretical work building a new kind of single neuron model. She, like <a href="https://braininspired.co/podcast/205/">Dmitri Chklovskii</a> a few episodes ago, believes we've been stuck with essentially the same family of models for a neuron for a long time, despite minor variations on those models. The model Gabriele is working on, for example, respects the computations going on not only externally, via spiking, which has been the only game in town forever, but also the computations going on within the cell itself. Gabriele is in line with previous guests like <a href="https://braininspired.co/podcast/126/">Randy Gallistel</a>, <a href="https://braininspired.co/podcast/172/">David Glanzman</a>, and <a href="https://braininspired.co/podcast/199/">Hessam Akhlaghpour</a>, who argue that we need to pay attention to how neurons are computing various things internally and how that affects our cognition. Gabriele also believes the new neuron model she's developing will improve AI, drastically simplifying the models by providing them with smarter neurons, essentially.</p>



<p>We also discuss the importance of neuromodulation, her interest in wanting to understand how we think via our internal verbal monologue, her lifelong interest in language in general, what she thinks about LLMs, why she decided to start her own foundation to fund her science, what that experience has been like so far. Gabriele has been working on these topics for many years, and as you'll hear in a moment, she was there when computational neuroscience was just starting to pop up in a few places, when it was a nascent field, unlike its current ubiquity in neuroscience.</p>



<ul class="wp-block-list">
<li><a href="https://gabriele-scheler.mystrikingly.com/">Gabriele's website</a>.</li>



<li><a href="https://www.theoretical-biology.org/">Carl Correns Foundation for Mathematical Biology</a>.
<ul class="wp-block-list">
<li><a href="https://braincentric.ai">Neuro-AI spinoff</a></li>
</ul>
</li>



<li>Related papers
<ul class="wp-block-list">
<li><a href="https://arxiv.org/pdf/2209.06865">Sketch of a novel approach to a neural model</a>.</li>



<li><a href="https://www.biorxiv.org/content/10.1101/658153v5">Localist neural plasticity identified by mutual information</a>.</li>
</ul>
</li>



<li>Related episodes
<ul class="wp-block-list">
<li><a href="https://braininspired.co/podcast/199/">BI 199 Hessam Akhlaghpour: Natural Universal Computation</a></li>



<li><a href="https://braininspired.co/podcast/172/">BI 172 David Glanzman: Memory All The Way Down</a></li>



<li><a href="https://braininspired.co/podcast/126/">BI 126 Randy Gallistel: Where Is the Engram?</a></li>
</ul>
</li>
</ul>



<p>0:00 - Intro
4:41 - Gabriele's early interests in verbal thinking
14:14 - What is thinking?
24:04 - Starting one's own foundation
58:18 - Building a new single neuron model
1:19:25 - The right level of abstraction
1:25:00 - How a new neuron would change AI</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Support the show to get full episodes, full archive, and join the Discord community.









Gabriele Scheler co-founded the Carl Correns Foundation for Mathematical Biology. Carl Correns was her great grandfather, one of the early pioneers in genetics. Gabriele is a computational neuroscientist, whose goal is to build models of cellular computation, and much of her focus is on neurons.



We discuss her theoretical work building a new kind of single neuron model. She, like Dmitri Chklovskii a few episodes ago, believes we've been stuck with essentially the same family of models for a neuron for a long time, despite minor variations on those models. The model Gabriele is working on, for example, respects the computations going on not only externally, via spiking, which has been the only game in town forever, but also the computations going on within the cell itself. Gabriele is in line with previous guests like Randy Gallistel, David Glanzman, and Hessam Akhlaghpour, who argue that we need to pay attention to how neurons are computing various things internally and how that affects our cognition. Gabriele also believes the new neuron model she's developing will improve AI, drastically simplifying the models by providing them with smarter neurons, essentially.



We also discuss the importance of neuromodulation, her interest in wanting to understand how we think via our internal verbal monologue, her lifelong interest in language in general, what she thinks about LLMs, why she decided to start her own foundation to fund her science, what that experience has been like so far. Gabriele has been working on these topics for many years, and as you'll hear in a moment, she was there when computational neuroscience was just starting to pop up in a few places, when it was a nascent field, unlike its current ubiquity in neuroscience.




Gabriele's website.



Carl Correns Foundation for Mathematical Biology.

Neuro-AI spinoff





Related papers

Sketch of a novel approach to a neural model.



Localist neural plasticity identified by mutual information.





Related episodes

BI 199 Hessam Akhlaghpour: Natural Universal Computation



BI 172 David Glanzman: Memory All The Way Down



BI 126 Randy Gallistel: Where Is the Engram?






0:00 - Intro
4:41 - Gabriele's early interests in verbal thinking
14:14 - What is thinking?
24:04 - Starting one's own foundation
58:18 - Building a new single neuron model
1:19:25 - The right level of abstraction
1:25:00 - How a new neuron would change AI]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 208 Gabriele Scheler: From Verbal Thought to Neuron Computation]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes, full archive, and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p>Gabriele Scheler co-founded the <a href="https://www.theoretical-biology.org/">Carl Correns Foundation for Mathematical Biology</a>. Carl Correns was her great grandfather, one of the early pioneers in genetics. Gabriele is a computational neuroscientist, whose goal is to build models of cellular computation, and much of her focus is on neurons.</p>



<p>We discuss her theoretical work building a new kind of single neuron model. She, like <a href="https://braininspired.co/podcast/205/">Dmitri Chklovskii</a> a few episodes ago, believes we've been stuck with essentially the same family of models for a neuron for a long time, despite minor variations on those models. The model Gabriele is working on, for example, respects the computations going on not only externally, via spiking, which has been the only game in town forever, but also the computations going on within the cell itself. Gabriele is in line with previous guests like <a href="https://braininspired.co/podcast/126/">Randy Gallistel</a>, <a href="https://braininspired.co/podcast/172/">David Glanzman</a>, and <a href="https://braininspired.co/podcast/199/">Hessam Akhlaghpour</a>, who argue that we need to pay attention to how neurons are computing various things internally and how that affects our cognition. Gabriele also believes the new neuron model she's developing will improve AI, drastically simplifying the models by providing them with smarter neurons, essentially.</p>



<p>We also discuss the importance of neuromodulation, her interest in wanting to understand how we think via our internal verbal monologue, her lifelong interest in language in general, what she thinks about LLMs, why she decided to start her own foundation to fund her science, what that experience has been like so far. Gabriele has been working on these topics for many years, and as you'll hear in a moment, she was there when computational neuroscience was just starting to pop up in a few places, when it was a nascent field, unlike its current ubiquity in neuroscience.</p>



<ul class="wp-block-list">
<li><a href="https://gabriele-scheler.mystrikingly.com/">Gabriele's website</a>.</li>



<li><a href="https://www.theoretical-biology.org/">Carl Correns Foundation for Mathematical Biology</a>.
<ul class="wp-block-list">
<li><a href="https://braincentric.ai">Neuro-AI spinoff</a></li>
</ul>
</li>



<li>Related papers
<ul class="wp-block-list">
<li><a href="https://arxiv.org/pdf/2209.06865">Sketch of a novel approach to a neural model</a>.</li>



<li><a href="https://www.biorxiv.org/content/10.1101/658153v5">Localist neural plasticity identified by mutual information</a>.</li>
</ul>
</li>



<li>Related episodes
<ul class="wp-block-list">
<li><a href="https://braininspired.co/podcast/199/">BI 199 Hessam Akhlaghpour: Natural Universal Computation</a></li>



<li><a href="https://braininspired.co/podcast/172/">BI 172 David Glanzman: Memory All The Way Down</a></li>



<li><a href="https://braininspired.co/podcast/126/">BI 126 Randy Gallistel: Where Is the Engram?</a></li>
</ul>
</li>
</ul>



<p>0:00 - Intro
4:41 - Gabriele's early interests in verbal thinking
14:14 - What is thinking?
24:04 - Starting one's own foundation
58:18 - Building a new single neuron model
1:19:25 - The right level of abstraction
1:25:00 - How a new neuron would change AI</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/1999568/c1e-m1mmsqwmjdbdvv2k-z3djoj3ds287-ssfaek.mp3" length="92178725"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Support the show to get full episodes, full archive, and join the Discord community.









Gabriele Scheler co-founded the Carl Correns Foundation for Mathematical Biology. Carl Correns was her great grandfather, one of the early pioneers in genetics. Gabriele is a computational neuroscientist, whose goal is to build models of cellular computation, and much of her focus is on neurons.



We discuss her theoretical work building a new kind of single neuron model. She, like Dmitri Chklovskii a few episodes ago, believes we've been stuck with essentially the same family of models for a neuron for a long time, despite minor variations on those models. The model Gabriele is working on, for example, respects the computations going on not only externally, via spiking, which has been the only game in town forever, but also the computations going on within the cell itself. Gabriele is in line with previous guests like Randy Gallistel, David Glanzman, and Hessam Akhlaghpour, who argue that we need to pay attention to how neurons are computing various things internally and how that affects our cognition. Gabriele also believes the new neuron model she's developing will improve AI, drastically simplifying the models by providing them with smarter neurons, essentially.



We also discuss the importance of neuromodulation, her interest in wanting to understand how we think via our internal verbal monologue, her lifelong interest in language in general, what she thinks about LLMs, why she decided to start her own foundation to fund her science, what that experience has been like so far. Gabriele has been working on these topics for many years, and as you'll hear in a moment, she was there when computational neuroscience was just starting to pop up in a few places, when it was a nascent field, unlike its current ubiquity in neuroscience.




Gabriele's website.



Carl Correns Foundation for Mathematical Biology.

Neuro-AI spinoff





Related papers

Sketch of a novel approach to a neural model.



Localist neural plasticity identified by mutual information.





Related episodes

BI 199 Hessam Akhlaghpour: Natural Universal Computation



BI 172 David Glanzman: Memory All The Way Down



BI 126 Randy Gallistel: Where Is the Engram?






0:00 - Intro
4:41 - Gabriele's early interests in verbal thinking
14:14 - What is thinking?
24:04 - Starting one's own foundation
58:18 - Building a new single neuron model
1:19:25 - The right level of abstraction
1:25:00 - How a new neuron would change AI]]>
                </itunes:summary>
                                                                            <itunes:duration>01:35:08</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 207 Alison Preston: Schemas in our Brains and Minds]]>
                </title>
                <pubDate>Wed, 12 Mar 2025 04:00:00 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/1991319</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-207-alison-preston-schemas-in-our-brains-and-minds</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes, full archive, and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>



<p><em>The Transmitter</em> is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.</p>



<p>Read more about <a href="https://www.thetransmitter.org/partners/">our partnership</a>.</p>



<p>Sign up for the <a href="https://www.thetransmitter.org/newsletters/">“Brain Inspired” email alerts</a> to be notified every time a new “Brain Inspired” episode is released.</p>



<p>To explore more neuroscience news and perspectives, visit <a href="http://thetransmitter.org">thetransmitter.org</a>.</p>





<p>The concept of a schema goes back at least to the philosopher Immanuel Kant in the 1700s, who use the term to refer to a kind of built-in mental framework to organize sensory experience. But it was the psychologist Frederic Bartlett in the 1930s who used the term schema in a psychological sense, to explain how our memories are organized and how new information gets integrated into our memory. Fast forward another 100 years to today, and we have a podcast episode with my guest today, Alison Preston, who runs the Preston Lab at the University of Texas at Austin. On this episode, we discuss her <em>neuroscience</em> research explaining how our brains might carry out the processing that fits with our modern conception of schemas, and how our brains do that in different ways as we develop from childhood to adulthood.</p>



<p>I just said, "our modern conception of schemas," but like everything else, there isn't complete consensus among scientists exactly how to define schema. Ali has her own definition. She shares that, and how it differs from other conceptions commonly used. I like Ali's version and think it should be adopted, in part because it helps distinguish schemas from a related term, cognitive maps, which we've discussed aplenty on brain inspired, and can sometimes be used interchangeably with schemas. So we discuss how to think about schemas versus cognitive maps, versus concepts, versus semantic information, and so on.</p>



<p><a href="https://braininspired.co/podcast/206/">Last episode Ciara Greene</a> discussed schemas and how they underlie our memories, and learning, and predictions, and how they can lead to inaccurate memories and predictions. Today Ali explains how circuits in the brain might adaptively underlie this process as we develop, and how to go about measuring it in the first place.</p>



<ul class="wp-block-list">
<li><a href="https://preston.clm.utexas.edu/">Preston Lab</a></li>



<li>Twitter: <a href="https://twitter.com/preston_lab">@preston_lab</a></li>



<li>Related papers:
<ul class="wp-block-list">
<li><a href="https://clm.utexas.edu/preston/wp-content/uploads/2022/05/1-s2.0-S235215462030190X-main.pdf">Concept formation as a computational cognitive process</a>.</li>



<li><a href="https://preston.clm.utexas.edu/wp-content/uploads/2023/08/Concept-formation-as-a-computational-cognitive-process.pdf">Schema, Inference, and Memory</a>.</li>



<li><a href="https://clm.utexas.edu/preston/wp-content/uploads/2022/05/s41562-021-01206-5.pdf">Developmental differences in memory reactivation relate to encoding and inference in the human brain</a>.</li>
</ul>
</li>
</ul>



<p>Read the <a href="https://www.thetransmitter.org/wp-content/uploads/2025/03/BI-207-transcript-proof.pdf">transcript</a>.</p>



<p>0:00 - Intro
6:51 - Schemas
20:37 - Schemas and the developing brain
35:03 - Information theory, dimensionality, and detail
41:17 - Geometry of schemas
47:26 - Schemas and creativity
50:29 - Brain connection pruning with development
1:02:46 - Information i...</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Support the show to get full episodes, full archive, and join the Discord community.







The Transmitter is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.



Read more about our partnership.



Sign up for the “Brain Inspired” email alerts to be notified every time a new “Brain Inspired” episode is released.



To explore more neuroscience news and perspectives, visit thetransmitter.org.





The concept of a schema goes back at least to the philosopher Immanuel Kant in the 1700s, who use the term to refer to a kind of built-in mental framework to organize sensory experience. But it was the psychologist Frederic Bartlett in the 1930s who used the term schema in a psychological sense, to explain how our memories are organized and how new information gets integrated into our memory. Fast forward another 100 years to today, and we have a podcast episode with my guest today, Alison Preston, who runs the Preston Lab at the University of Texas at Austin. On this episode, we discuss her neuroscience research explaining how our brains might carry out the processing that fits with our modern conception of schemas, and how our brains do that in different ways as we develop from childhood to adulthood.



I just said, "our modern conception of schemas," but like everything else, there isn't complete consensus among scientists exactly how to define schema. Ali has her own definition. She shares that, and how it differs from other conceptions commonly used. I like Ali's version and think it should be adopted, in part because it helps distinguish schemas from a related term, cognitive maps, which we've discussed aplenty on brain inspired, and can sometimes be used interchangeably with schemas. So we discuss how to think about schemas versus cognitive maps, versus concepts, versus semantic information, and so on.



Last episode Ciara Greene discussed schemas and how they underlie our memories, and learning, and predictions, and how they can lead to inaccurate memories and predictions. Today Ali explains how circuits in the brain might adaptively underlie this process as we develop, and how to go about measuring it in the first place.




Preston Lab



Twitter: @preston_lab



Related papers:

Concept formation as a computational cognitive process.



Schema, Inference, and Memory.



Developmental differences in memory reactivation relate to encoding and inference in the human brain.






Read the transcript.



0:00 - Intro
6:51 - Schemas
20:37 - Schemas and the developing brain
35:03 - Information theory, dimensionality, and detail
41:17 - Geometry of schemas
47:26 - Schemas and creativity
50:29 - Brain connection pruning with development
1:02:46 - Information i...]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 207 Alison Preston: Schemas in our Brains and Minds]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes, full archive, and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>



<p><em>The Transmitter</em> is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.</p>



<p>Read more about <a href="https://www.thetransmitter.org/partners/">our partnership</a>.</p>



<p>Sign up for the <a href="https://www.thetransmitter.org/newsletters/">“Brain Inspired” email alerts</a> to be notified every time a new “Brain Inspired” episode is released.</p>



<p>To explore more neuroscience news and perspectives, visit <a href="http://thetransmitter.org">thetransmitter.org</a>.</p>





<p>The concept of a schema goes back at least to the philosopher Immanuel Kant in the 1700s, who use the term to refer to a kind of built-in mental framework to organize sensory experience. But it was the psychologist Frederic Bartlett in the 1930s who used the term schema in a psychological sense, to explain how our memories are organized and how new information gets integrated into our memory. Fast forward another 100 years to today, and we have a podcast episode with my guest today, Alison Preston, who runs the Preston Lab at the University of Texas at Austin. On this episode, we discuss her <em>neuroscience</em> research explaining how our brains might carry out the processing that fits with our modern conception of schemas, and how our brains do that in different ways as we develop from childhood to adulthood.</p>



<p>I just said, "our modern conception of schemas," but like everything else, there isn't complete consensus among scientists exactly how to define schema. Ali has her own definition. She shares that, and how it differs from other conceptions commonly used. I like Ali's version and think it should be adopted, in part because it helps distinguish schemas from a related term, cognitive maps, which we've discussed aplenty on brain inspired, and can sometimes be used interchangeably with schemas. So we discuss how to think about schemas versus cognitive maps, versus concepts, versus semantic information, and so on.</p>



<p><a href="https://braininspired.co/podcast/206/">Last episode Ciara Greene</a> discussed schemas and how they underlie our memories, and learning, and predictions, and how they can lead to inaccurate memories and predictions. Today Ali explains how circuits in the brain might adaptively underlie this process as we develop, and how to go about measuring it in the first place.</p>



<ul class="wp-block-list">
<li><a href="https://preston.clm.utexas.edu/">Preston Lab</a></li>



<li>Twitter: <a href="https://twitter.com/preston_lab">@preston_lab</a></li>



<li>Related papers:
<ul class="wp-block-list">
<li><a href="https://clm.utexas.edu/preston/wp-content/uploads/2022/05/1-s2.0-S235215462030190X-main.pdf">Concept formation as a computational cognitive process</a>.</li>



<li><a href="https://preston.clm.utexas.edu/wp-content/uploads/2023/08/Concept-formation-as-a-computational-cognitive-process.pdf">Schema, Inference, and Memory</a>.</li>



<li><a href="https://clm.utexas.edu/preston/wp-content/uploads/2022/05/s41562-021-01206-5.pdf">Developmental differences in memory reactivation relate to encoding and inference in the human brain</a>.</li>
</ul>
</li>
</ul>



<p>Read the <a href="https://www.thetransmitter.org/wp-content/uploads/2025/03/BI-207-transcript-proof.pdf">transcript</a>.</p>



<p>0:00 - Intro
6:51 - Schemas
20:37 - Schemas and the developing brain
35:03 - Information theory, dimensionality, and detail
41:17 - Geometry of schemas
47:26 - Schemas and creativity
50:29 - Brain connection pruning with development
1:02:46 - Information in brains
1:09:20 - Schemas and development in AI</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/1991319/c1e-1dmmf57936hwpo8p-6z1g206mt31m-amiv13.mp3" length="87126321"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Support the show to get full episodes, full archive, and join the Discord community.







The Transmitter is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.



Read more about our partnership.



Sign up for the “Brain Inspired” email alerts to be notified every time a new “Brain Inspired” episode is released.



To explore more neuroscience news and perspectives, visit thetransmitter.org.





The concept of a schema goes back at least to the philosopher Immanuel Kant in the 1700s, who use the term to refer to a kind of built-in mental framework to organize sensory experience. But it was the psychologist Frederic Bartlett in the 1930s who used the term schema in a psychological sense, to explain how our memories are organized and how new information gets integrated into our memory. Fast forward another 100 years to today, and we have a podcast episode with my guest today, Alison Preston, who runs the Preston Lab at the University of Texas at Austin. On this episode, we discuss her neuroscience research explaining how our brains might carry out the processing that fits with our modern conception of schemas, and how our brains do that in different ways as we develop from childhood to adulthood.



I just said, "our modern conception of schemas," but like everything else, there isn't complete consensus among scientists exactly how to define schema. Ali has her own definition. She shares that, and how it differs from other conceptions commonly used. I like Ali's version and think it should be adopted, in part because it helps distinguish schemas from a related term, cognitive maps, which we've discussed aplenty on brain inspired, and can sometimes be used interchangeably with schemas. So we discuss how to think about schemas versus cognitive maps, versus concepts, versus semantic information, and so on.



Last episode Ciara Greene discussed schemas and how they underlie our memories, and learning, and predictions, and how they can lead to inaccurate memories and predictions. Today Ali explains how circuits in the brain might adaptively underlie this process as we develop, and how to go about measuring it in the first place.




Preston Lab



Twitter: @preston_lab



Related papers:

Concept formation as a computational cognitive process.



Schema, Inference, and Memory.



Developmental differences in memory reactivation relate to encoding and inference in the human brain.






Read the transcript.



0:00 - Intro
6:51 - Schemas
20:37 - Schemas and the developing brain
35:03 - Information theory, dimensionality, and detail
41:17 - Geometry of schemas
47:26 - Schemas and creativity
50:29 - Brain connection pruning with development
1:02:46 - Information i...]]>
                </itunes:summary>
                                                                            <itunes:duration>01:29:47</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[Quick Announcement: Complexity Group]]>
                </title>
                <pubDate>Wed, 05 Mar 2025 00:08:34 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/1986095</guid>
                                    <link>https://brain-inspired.castos.com/episodes/quick-announcement-complexity-group</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><strong>Here's the link to learn more and sign up:</strong></p>



<p class="has-text-align-center"><strong><a href="https://braininspired.co/complexity-group-email/">Complexity Group Email</a> List.</strong></p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Here's the link to learn more and sign up:



Complexity Group Email List.]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[Quick Announcement: Complexity Group]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><strong>Here's the link to learn more and sign up:</strong></p>



<p class="has-text-align-center"><strong><a href="https://braininspired.co/complexity-group-email/">Complexity Group Email</a> List.</strong></p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/1986095/c1e-3g11t59739sw29gm-kpw6x2pmark4-kias3g.mp3" length="13213036"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Here's the link to learn more and sign up:



Complexity Group Email List.]]>
                </itunes:summary>
                                                                            <itunes:duration>00:06:47</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 206 Ciara Greene: Memories Are Useful, Not Accurate]]>
                </title>
                <pubDate>Wed, 26 Feb 2025 05:00:00 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/1982066</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-206-ciara-greene-memories-are-useful-not-accurate</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes, full archive, and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>







<p>Ciara Greene is Associate Professor in the University College Dublin School of Psychology. In this episode we discuss Ciara's book <a href="https://amzn.to/42l4msv">Memory Lane: The Perfectly Imperfect Ways We Remember</a>, co-authored by her colleague Gillian Murphy. The book is all about how human episodic memory works and why it works the way it does. Contrary to our common assumption, a "good memory" isn't necessarily highly accurate - we don't store memories like files in a filing cabinet. Instead our memories evolved to help us function in the world. That means our memories are flexible, constantly changing, and that forgetting can be beneficial, for example.</p>



<p>Regarding how our memories work, we discuss how memories are reconstructed each time we access them, and the role of schemas in organizing our episodic memories within the context of our previous experiences. Because our memories evolved for function and not accuracy, there's a wide range of flexibility in how we process and store memories. We're all susceptible to misinformation, all our memories are affected by our emotional states, and so on. Ciara's research explores many of the ways our memories are shaped by these various conditions, and how we should better understand our own and other's memories.</p>



<ul class="wp-block-list">
<li><a href="https://ucdattentionmemory.com/">Attention and Memory Lab</a></li>



<li>Twitter: <a href="https://x.com/ciaragreene01">@ciaragreene01</a>.</li>



<li>Book: <a href="https://amzn.to/42l4msv">Memory Lane: The Perfectly Imperfect Ways We Remember</a></li>
</ul>



<p>Read the <a href="https://www.thetransmitter.org/wp-content/uploads/2025/02/BI-206-transcript-final.pdf" target="_blank" rel="noreferrer noopener">transcript</a>.</p>



<p>0:00 - Intro
5:35 - The function of memory
6:41 - Reconstructive nature of memory
13:50 - Memory schemas, highly superior autobiographical memory
20:49 - Misremembering and flashbulb memories
27:52 - Forgetting and schemas
36:06 - What is a "good" memory?
39:35 - Memories and intention
43:47 - Memory and context
49:55 - Implanting false memories
1:04:10 - Memory suggestion during interrogations
1:06:30 - Memory, imagination, and creativity
1:13:45 - Artificial intelligence and memory
1:21:21 - Driven by questions</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Support the show to get full episodes, full archive, and join the Discord community.











Ciara Greene is Associate Professor in the University College Dublin School of Psychology. In this episode we discuss Ciara's book Memory Lane: The Perfectly Imperfect Ways We Remember, co-authored by her colleague Gillian Murphy. The book is all about how human episodic memory works and why it works the way it does. Contrary to our common assumption, a "good memory" isn't necessarily highly accurate - we don't store memories like files in a filing cabinet. Instead our memories evolved to help us function in the world. That means our memories are flexible, constantly changing, and that forgetting can be beneficial, for example.



Regarding how our memories work, we discuss how memories are reconstructed each time we access them, and the role of schemas in organizing our episodic memories within the context of our previous experiences. Because our memories evolved for function and not accuracy, there's a wide range of flexibility in how we process and store memories. We're all susceptible to misinformation, all our memories are affected by our emotional states, and so on. Ciara's research explores many of the ways our memories are shaped by these various conditions, and how we should better understand our own and other's memories.




Attention and Memory Lab



Twitter: @ciaragreene01.



Book: Memory Lane: The Perfectly Imperfect Ways We Remember




Read the transcript.



0:00 - Intro
5:35 - The function of memory
6:41 - Reconstructive nature of memory
13:50 - Memory schemas, highly superior autobiographical memory
20:49 - Misremembering and flashbulb memories
27:52 - Forgetting and schemas
36:06 - What is a "good" memory?
39:35 - Memories and intention
43:47 - Memory and context
49:55 - Implanting false memories
1:04:10 - Memory suggestion during interrogations
1:06:30 - Memory, imagination, and creativity
1:13:45 - Artificial intelligence and memory
1:21:21 - Driven by questions]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 206 Ciara Greene: Memories Are Useful, Not Accurate]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes, full archive, and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>







<p>Ciara Greene is Associate Professor in the University College Dublin School of Psychology. In this episode we discuss Ciara's book <a href="https://amzn.to/42l4msv">Memory Lane: The Perfectly Imperfect Ways We Remember</a>, co-authored by her colleague Gillian Murphy. The book is all about how human episodic memory works and why it works the way it does. Contrary to our common assumption, a "good memory" isn't necessarily highly accurate - we don't store memories like files in a filing cabinet. Instead our memories evolved to help us function in the world. That means our memories are flexible, constantly changing, and that forgetting can be beneficial, for example.</p>



<p>Regarding how our memories work, we discuss how memories are reconstructed each time we access them, and the role of schemas in organizing our episodic memories within the context of our previous experiences. Because our memories evolved for function and not accuracy, there's a wide range of flexibility in how we process and store memories. We're all susceptible to misinformation, all our memories are affected by our emotional states, and so on. Ciara's research explores many of the ways our memories are shaped by these various conditions, and how we should better understand our own and other's memories.</p>



<ul class="wp-block-list">
<li><a href="https://ucdattentionmemory.com/">Attention and Memory Lab</a></li>



<li>Twitter: <a href="https://x.com/ciaragreene01">@ciaragreene01</a>.</li>



<li>Book: <a href="https://amzn.to/42l4msv">Memory Lane: The Perfectly Imperfect Ways We Remember</a></li>
</ul>



<p>Read the <a href="https://www.thetransmitter.org/wp-content/uploads/2025/02/BI-206-transcript-final.pdf" target="_blank" rel="noreferrer noopener">transcript</a>.</p>



<p>0:00 - Intro
5:35 - The function of memory
6:41 - Reconstructive nature of memory
13:50 - Memory schemas, highly superior autobiographical memory
20:49 - Misremembering and flashbulb memories
27:52 - Forgetting and schemas
36:06 - What is a "good" memory?
39:35 - Memories and intention
43:47 - Memory and context
49:55 - Implanting false memories
1:04:10 - Memory suggestion during interrogations
1:06:30 - Memory, imagination, and creativity
1:13:45 - Artificial intelligence and memory
1:21:21 - Driven by questions</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/1982066/c1e-pjzza57o88c1dxpr-okwqmpp4u2nq-spdzat.mp3" length="86934394"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Support the show to get full episodes, full archive, and join the Discord community.











Ciara Greene is Associate Professor in the University College Dublin School of Psychology. In this episode we discuss Ciara's book Memory Lane: The Perfectly Imperfect Ways We Remember, co-authored by her colleague Gillian Murphy. The book is all about how human episodic memory works and why it works the way it does. Contrary to our common assumption, a "good memory" isn't necessarily highly accurate - we don't store memories like files in a filing cabinet. Instead our memories evolved to help us function in the world. That means our memories are flexible, constantly changing, and that forgetting can be beneficial, for example.



Regarding how our memories work, we discuss how memories are reconstructed each time we access them, and the role of schemas in organizing our episodic memories within the context of our previous experiences. Because our memories evolved for function and not accuracy, there's a wide range of flexibility in how we process and store memories. We're all susceptible to misinformation, all our memories are affected by our emotional states, and so on. Ciara's research explores many of the ways our memories are shaped by these various conditions, and how we should better understand our own and other's memories.




Attention and Memory Lab



Twitter: @ciaragreene01.



Book: Memory Lane: The Perfectly Imperfect Ways We Remember




Read the transcript.



0:00 - Intro
5:35 - The function of memory
6:41 - Reconstructive nature of memory
13:50 - Memory schemas, highly superior autobiographical memory
20:49 - Misremembering and flashbulb memories
27:52 - Forgetting and schemas
36:06 - What is a "good" memory?
39:35 - Memories and intention
43:47 - Memory and context
49:55 - Implanting false memories
1:04:10 - Memory suggestion during interrogations
1:06:30 - Memory, imagination, and creativity
1:13:45 - Artificial intelligence and memory
1:21:21 - Driven by questions]]>
                </itunes:summary>
                                                                            <itunes:duration>01:29:10</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 205 Dmitri Chklovskii: Neurons Are Smarter Than You Think]]>
                </title>
                <pubDate>Wed, 12 Feb 2025 05:00:00 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/1971072</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-205-dmitri-chklovskii-neurons-are-smarter-than-you-think-1</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes, full archive, and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p><em>The Transmitter</em> is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.</p>



<p>Read more about <a href="https://www.thetransmitter.org/partners/">our partnership</a>.</p>



<p>Sign up for the “Brain Inspired” <a href="https://www.thetransmitter.org/newsletters/">email alerts</a> to be notified every time a new “Brain Inspired” episode is released: </p>



<p>To explore more neuroscience news and perspectives, visit <a href="http://thetransmitter.org">thetransmitter.org</a>.</p>



<p>Since the 1940s and 50s, back at the origins of what we now think of as artificial intelligence, there have been lots of ways of conceiving what it is that brains do, or what the function of the brain is. One of those conceptions, going to back to cybernetics, is that the brain is a controller that operates under the principles of feedback control. This view has been carried down in various forms to us in present day. Also since that same time period, when McCulloch and Pitts suggested that single neurons are logical devices, there have been lots of ways of conceiving what it is that single neurons do. Are they logical operators, do they each represent something special, are they trying to maximize efficiency, for example?</p>



<p>Dmitri Chklovskii, who goes by Mitya, runs the Neural Circuits and Algorithms lab at the Flatiron Institute. Mitya believes that single neurons themselves are each individual controllers. They're smart agents, each trying to predict their inputs, like in predictive processing, but also functioning as an optimal feedback controller. We talk about historical conceptions of the function of single neurons and how this differs, we talk about how to think of single neurons versus populations of neurons, some of the neuroscience findings that seem to support Mitya's account, the control algorithm that simplifies the neuron's otherwise impossible control task, and other various topics.</p>



<p>We also discuss Mitya's early interests, coming from a physics and engineering background, in how to wire up our brains efficiently, given the limited amount of space in our craniums. Obviously evolution produced its own solutions for this problem. This pursuit led Mitya to study the C. elegans worm, because its connectome was nearly complete- actually, Mitya and his team helped complete the connectome so he'd have the whole wiring diagram to study it. So we talk about that work, and what knowing the whole connectome of C. elegans has and has not taught us about how brains work.</p>



<ul class="wp-block-list">
<li><a href="https://neural-circuits-and-algorithms.github.io/">Chklovskii Lab</a>.</li>



<li>Twitter: <a href="https://x.com/chklovskii">@chklovskii</a>.</li>



<li>Related papers
<ul class="wp-block-list">
<li><a href="https://www.biorxiv.org/content/10.1101/2024.01.02.573843v1">The Neuron as a Direct Data-Driven Controller</a>.</li>



<li><a href="https://www.pnas.org/doi/10.1073/pnas.2117484120">Normative and mechanistic model of an adaptive circuit for efficient encoding and feature extraction</a>.</li>
</ul>
</li>



<li>Related episodes
<ul class="wp-block-list">
<li><a href="https://braininspired.co/podcast/143/">BI 143 Rodolphe Sepulchre: Mixed Feedback Control</a></li>



<li><a href="https://braininspired.co/podcast/119/">BI 119 Henry Yin: The Crisis in Neuroscience</a></li>
</ul>
</li>
</ul>



<p>Read the <a href="https://www.thetransmitter.org/wp-content/uploads/2025/02/BI-205-transcript-production.pdf" target="_blank" rel="noreferrer noopener"></a></p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Support the show to get full episodes, full archive, and join the Discord community.









The Transmitter is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.



Read more about our partnership.



Sign up for the “Brain Inspired” email alerts to be notified every time a new “Brain Inspired” episode is released: 



To explore more neuroscience news and perspectives, visit thetransmitter.org.



Since the 1940s and 50s, back at the origins of what we now think of as artificial intelligence, there have been lots of ways of conceiving what it is that brains do, or what the function of the brain is. One of those conceptions, going to back to cybernetics, is that the brain is a controller that operates under the principles of feedback control. This view has been carried down in various forms to us in present day. Also since that same time period, when McCulloch and Pitts suggested that single neurons are logical devices, there have been lots of ways of conceiving what it is that single neurons do. Are they logical operators, do they each represent something special, are they trying to maximize efficiency, for example?



Dmitri Chklovskii, who goes by Mitya, runs the Neural Circuits and Algorithms lab at the Flatiron Institute. Mitya believes that single neurons themselves are each individual controllers. They're smart agents, each trying to predict their inputs, like in predictive processing, but also functioning as an optimal feedback controller. We talk about historical conceptions of the function of single neurons and how this differs, we talk about how to think of single neurons versus populations of neurons, some of the neuroscience findings that seem to support Mitya's account, the control algorithm that simplifies the neuron's otherwise impossible control task, and other various topics.



We also discuss Mitya's early interests, coming from a physics and engineering background, in how to wire up our brains efficiently, given the limited amount of space in our craniums. Obviously evolution produced its own solutions for this problem. This pursuit led Mitya to study the C. elegans worm, because its connectome was nearly complete- actually, Mitya and his team helped complete the connectome so he'd have the whole wiring diagram to study it. So we talk about that work, and what knowing the whole connectome of C. elegans has and has not taught us about how brains work.




Chklovskii Lab.



Twitter: @chklovskii.



Related papers

The Neuron as a Direct Data-Driven Controller.



Normative and mechanistic model of an adaptive circuit for efficient encoding and feature extraction.





Related episodes

BI 143 Rodolphe Sepulchre: Mixed Feedback Control



BI 119 Henry Yin: The Crisis in Neuroscience






Read the ]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 205 Dmitri Chklovskii: Neurons Are Smarter Than You Think]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes, full archive, and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p><em>The Transmitter</em> is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.</p>



<p>Read more about <a href="https://www.thetransmitter.org/partners/">our partnership</a>.</p>



<p>Sign up for the “Brain Inspired” <a href="https://www.thetransmitter.org/newsletters/">email alerts</a> to be notified every time a new “Brain Inspired” episode is released: </p>



<p>To explore more neuroscience news and perspectives, visit <a href="http://thetransmitter.org">thetransmitter.org</a>.</p>



<p>Since the 1940s and 50s, back at the origins of what we now think of as artificial intelligence, there have been lots of ways of conceiving what it is that brains do, or what the function of the brain is. One of those conceptions, going to back to cybernetics, is that the brain is a controller that operates under the principles of feedback control. This view has been carried down in various forms to us in present day. Also since that same time period, when McCulloch and Pitts suggested that single neurons are logical devices, there have been lots of ways of conceiving what it is that single neurons do. Are they logical operators, do they each represent something special, are they trying to maximize efficiency, for example?</p>



<p>Dmitri Chklovskii, who goes by Mitya, runs the Neural Circuits and Algorithms lab at the Flatiron Institute. Mitya believes that single neurons themselves are each individual controllers. They're smart agents, each trying to predict their inputs, like in predictive processing, but also functioning as an optimal feedback controller. We talk about historical conceptions of the function of single neurons and how this differs, we talk about how to think of single neurons versus populations of neurons, some of the neuroscience findings that seem to support Mitya's account, the control algorithm that simplifies the neuron's otherwise impossible control task, and other various topics.</p>



<p>We also discuss Mitya's early interests, coming from a physics and engineering background, in how to wire up our brains efficiently, given the limited amount of space in our craniums. Obviously evolution produced its own solutions for this problem. This pursuit led Mitya to study the C. elegans worm, because its connectome was nearly complete- actually, Mitya and his team helped complete the connectome so he'd have the whole wiring diagram to study it. So we talk about that work, and what knowing the whole connectome of C. elegans has and has not taught us about how brains work.</p>



<ul class="wp-block-list">
<li><a href="https://neural-circuits-and-algorithms.github.io/">Chklovskii Lab</a>.</li>



<li>Twitter: <a href="https://x.com/chklovskii">@chklovskii</a>.</li>



<li>Related papers
<ul class="wp-block-list">
<li><a href="https://www.biorxiv.org/content/10.1101/2024.01.02.573843v1">The Neuron as a Direct Data-Driven Controller</a>.</li>



<li><a href="https://www.pnas.org/doi/10.1073/pnas.2117484120">Normative and mechanistic model of an adaptive circuit for efficient encoding and feature extraction</a>.</li>
</ul>
</li>



<li>Related episodes
<ul class="wp-block-list">
<li><a href="https://braininspired.co/podcast/143/">BI 143 Rodolphe Sepulchre: Mixed Feedback Control</a></li>



<li><a href="https://braininspired.co/podcast/119/">BI 119 Henry Yin: The Crisis in Neuroscience</a></li>
</ul>
</li>
</ul>



<p>Read the <a href="https://www.thetransmitter.org/wp-content/uploads/2025/02/BI-205-transcript-production.pdf" target="_blank" rel="noreferrer noopener">transcript</a>.</p>



<p>0:00 - Intro
7:34 - Physicists approach for neuroscience
12:39 - What's missing in AI and neuroscience?
16:36 - Connectomes
31:51 - Understanding complex systems
33:17 - Earliest models of neurons
39:08 - Smart neurons
42:56 - Neuron theories that influenced Mitya
46:50 - Neuron as a controller
55:03 - How to test the neuron as controller hypothesis
1:00:29 - Direct data-driven control
1:11:09 - Experimental evidence
1:22:25 - Single neuron doctrine and population doctrine
1:25:30 - Neurons as agents
1:28:52 - Implications for AI
1:30:02 - Limits to control perspective</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/1971072/c1e-z9xxcm0vwwsgkpmk-z3dww0vxb9wk-7oosep.mp3" length="96935517"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Support the show to get full episodes, full archive, and join the Discord community.









The Transmitter is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.



Read more about our partnership.



Sign up for the “Brain Inspired” email alerts to be notified every time a new “Brain Inspired” episode is released: 



To explore more neuroscience news and perspectives, visit thetransmitter.org.



Since the 1940s and 50s, back at the origins of what we now think of as artificial intelligence, there have been lots of ways of conceiving what it is that brains do, or what the function of the brain is. One of those conceptions, going to back to cybernetics, is that the brain is a controller that operates under the principles of feedback control. This view has been carried down in various forms to us in present day. Also since that same time period, when McCulloch and Pitts suggested that single neurons are logical devices, there have been lots of ways of conceiving what it is that single neurons do. Are they logical operators, do they each represent something special, are they trying to maximize efficiency, for example?



Dmitri Chklovskii, who goes by Mitya, runs the Neural Circuits and Algorithms lab at the Flatiron Institute. Mitya believes that single neurons themselves are each individual controllers. They're smart agents, each trying to predict their inputs, like in predictive processing, but also functioning as an optimal feedback controller. We talk about historical conceptions of the function of single neurons and how this differs, we talk about how to think of single neurons versus populations of neurons, some of the neuroscience findings that seem to support Mitya's account, the control algorithm that simplifies the neuron's otherwise impossible control task, and other various topics.



We also discuss Mitya's early interests, coming from a physics and engineering background, in how to wire up our brains efficiently, given the limited amount of space in our craniums. Obviously evolution produced its own solutions for this problem. This pursuit led Mitya to study the C. elegans worm, because its connectome was nearly complete- actually, Mitya and his team helped complete the connectome so he'd have the whole wiring diagram to study it. So we talk about that work, and what knowing the whole connectome of C. elegans has and has not taught us about how brains work.




Chklovskii Lab.



Twitter: @chklovskii.



Related papers

The Neuron as a Direct Data-Driven Controller.



Normative and mechanistic model of an adaptive circuit for efficient encoding and feature extraction.





Related episodes

BI 143 Rodolphe Sepulchre: Mixed Feedback Control



BI 119 Henry Yin: The Crisis in Neuroscience






Read the ]]>
                </itunes:summary>
                                                                            <itunes:duration>01:39:05</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 204 David Robbe: Your Brain Doesn't Measure Time]]>
                </title>
                <pubDate>Wed, 29 Jan 2025 05:00:00 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/1953384</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-204-david-robbe-your-brain-doesnt-measure-time-1</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes, full archive, and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p><em>The Transmitter</em> is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.</p>



<p>Read more about <a href="https://www.thetransmitter.org/partners/">our partnership</a>.</p>



<p>Sign up for the <a href="https://www.thetransmitter.org/newsletters/">“Brain Inspired” email alerts</a> to be notified every time a new “Brain Inspired” episode is released: </p>



<p>To explore more neuroscience news and perspectives, visit <a href="http://thetransmitter.org">thetransmitter.org</a>.</p>



<p>When you play hide and seek, as you do on a regular basis I'm sure, and you count to ten before shouting, "Ready or not, here I come," how do you keep track of time? Is it a clock in your brain, as many neuroscientists assume and therefore search for in their research? Or is it something else? Maybe the rhythm of your vocalization as you say, "one-one thousand, two-one thousand"? Even if you’re counting silently, could it be that you’re imagining the movements of speaking aloud and tracking those virtual actions? My guest today, neuroscientist David Robbe, believes we don't rely on clocks in our brains, or measure time internally, or really that we measure time at all. Rather, our estimation of time emerges through our interactions with the world around us and/or the world within us as we behave.</p>



<p>David is group leader of the Cortical-Basal Ganglia Circuits and Behavior Lab at the Institute of Mediterranean Neurobiology. His perspective on how organisms measure time is the result of his own behavioral experiments with rodents, and by revisiting one of his favorite philosophers, Henri Bergson. So in this episode, we discuss how all of this came about - how neuroscientists have long searched for brain activity that measures or keeps track of time in areas like the basal ganglia, which is the brain region David focuses on, how the rodents he studies behave in surprising ways when he asks them to estimate time intervals, and how Bergson introduce the world to the notion of durée, our lived experience and feeling of time.</p>



<ul class="wp-block-list">
<li><a href="https://www.inmed.fr/en/en-avenir-dynamiques-neuronales-et-fonctions-des-ganglions-de-la-base">Cortical-Basal Ganglia Circuits and Behavior Lab</a>.</li>



<li>Twitter: <a href="https://x.com/dav_robbe">@dav_robbe</a></li>



<li>Related papers
<ul class="wp-block-list">
<li><a href="https://amu.hal.science/hal-04225756/document">Lost in time: Relocating the perception of duration outside the brain</a>.</li>



<li><a href="https://www.biorxiv.org/content/10.1101/2024.05.31.596850v1">Running, Fast and Slow: The Dorsal Striatum Sets the Cost ofMovement During Foraging</a>.</li>
</ul>
</li>
</ul>



<p>0:00 - Intro
3:59 - Why behavior is so important in itself
10:27 - Henri Bergson
21:17 - Bergson's view of life
26:25 - A task to test how animals time things
34:08 - Back to Bergson and duree
39:44 - Externalizing time
44:11 - Internal representation of time
1:03:38 - Cognition as internal movement
1:09:14 - Free will
1:15:27 - Implications for AI</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Support the show to get full episodes, full archive, and join the Discord community.









The Transmitter is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.



Read more about our partnership.



Sign up for the “Brain Inspired” email alerts to be notified every time a new “Brain Inspired” episode is released: 



To explore more neuroscience news and perspectives, visit thetransmitter.org.



When you play hide and seek, as you do on a regular basis I'm sure, and you count to ten before shouting, "Ready or not, here I come," how do you keep track of time? Is it a clock in your brain, as many neuroscientists assume and therefore search for in their research? Or is it something else? Maybe the rhythm of your vocalization as you say, "one-one thousand, two-one thousand"? Even if you’re counting silently, could it be that you’re imagining the movements of speaking aloud and tracking those virtual actions? My guest today, neuroscientist David Robbe, believes we don't rely on clocks in our brains, or measure time internally, or really that we measure time at all. Rather, our estimation of time emerges through our interactions with the world around us and/or the world within us as we behave.



David is group leader of the Cortical-Basal Ganglia Circuits and Behavior Lab at the Institute of Mediterranean Neurobiology. His perspective on how organisms measure time is the result of his own behavioral experiments with rodents, and by revisiting one of his favorite philosophers, Henri Bergson. So in this episode, we discuss how all of this came about - how neuroscientists have long searched for brain activity that measures or keeps track of time in areas like the basal ganglia, which is the brain region David focuses on, how the rodents he studies behave in surprising ways when he asks them to estimate time intervals, and how Bergson introduce the world to the notion of durée, our lived experience and feeling of time.




Cortical-Basal Ganglia Circuits and Behavior Lab.



Twitter: @dav_robbe



Related papers

Lost in time: Relocating the perception of duration outside the brain.



Running, Fast and Slow: The Dorsal Striatum Sets the Cost ofMovement During Foraging.






0:00 - Intro
3:59 - Why behavior is so important in itself
10:27 - Henri Bergson
21:17 - Bergson's view of life
26:25 - A task to test how animals time things
34:08 - Back to Bergson and duree
39:44 - Externalizing time
44:11 - Internal representation of time
1:03:38 - Cognition as internal movement
1:09:14 - Free will
1:15:27 - Implications for AI]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 204 David Robbe: Your Brain Doesn't Measure Time]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes, full archive, and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p><em>The Transmitter</em> is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.</p>



<p>Read more about <a href="https://www.thetransmitter.org/partners/">our partnership</a>.</p>



<p>Sign up for the <a href="https://www.thetransmitter.org/newsletters/">“Brain Inspired” email alerts</a> to be notified every time a new “Brain Inspired” episode is released: </p>



<p>To explore more neuroscience news and perspectives, visit <a href="http://thetransmitter.org">thetransmitter.org</a>.</p>



<p>When you play hide and seek, as you do on a regular basis I'm sure, and you count to ten before shouting, "Ready or not, here I come," how do you keep track of time? Is it a clock in your brain, as many neuroscientists assume and therefore search for in their research? Or is it something else? Maybe the rhythm of your vocalization as you say, "one-one thousand, two-one thousand"? Even if you’re counting silently, could it be that you’re imagining the movements of speaking aloud and tracking those virtual actions? My guest today, neuroscientist David Robbe, believes we don't rely on clocks in our brains, or measure time internally, or really that we measure time at all. Rather, our estimation of time emerges through our interactions with the world around us and/or the world within us as we behave.</p>



<p>David is group leader of the Cortical-Basal Ganglia Circuits and Behavior Lab at the Institute of Mediterranean Neurobiology. His perspective on how organisms measure time is the result of his own behavioral experiments with rodents, and by revisiting one of his favorite philosophers, Henri Bergson. So in this episode, we discuss how all of this came about - how neuroscientists have long searched for brain activity that measures or keeps track of time in areas like the basal ganglia, which is the brain region David focuses on, how the rodents he studies behave in surprising ways when he asks them to estimate time intervals, and how Bergson introduce the world to the notion of durée, our lived experience and feeling of time.</p>



<ul class="wp-block-list">
<li><a href="https://www.inmed.fr/en/en-avenir-dynamiques-neuronales-et-fonctions-des-ganglions-de-la-base">Cortical-Basal Ganglia Circuits and Behavior Lab</a>.</li>



<li>Twitter: <a href="https://x.com/dav_robbe">@dav_robbe</a></li>



<li>Related papers
<ul class="wp-block-list">
<li><a href="https://amu.hal.science/hal-04225756/document">Lost in time: Relocating the perception of duration outside the brain</a>.</li>



<li><a href="https://www.biorxiv.org/content/10.1101/2024.05.31.596850v1">Running, Fast and Slow: The Dorsal Striatum Sets the Cost ofMovement During Foraging</a>.</li>
</ul>
</li>
</ul>



<p>0:00 - Intro
3:59 - Why behavior is so important in itself
10:27 - Henri Bergson
21:17 - Bergson's view of life
26:25 - A task to test how animals time things
34:08 - Back to Bergson and duree
39:44 - Externalizing time
44:11 - Internal representation of time
1:03:38 - Cognition as internal movement
1:09:14 - Free will
1:15:27 - Implications for AI</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/1953384/c1e-7kpph4jvv5a3z4kq-7z2rd0ovu3pd-4bsubz.mp3" length="94690750"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Support the show to get full episodes, full archive, and join the Discord community.









The Transmitter is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.



Read more about our partnership.



Sign up for the “Brain Inspired” email alerts to be notified every time a new “Brain Inspired” episode is released: 



To explore more neuroscience news and perspectives, visit thetransmitter.org.



When you play hide and seek, as you do on a regular basis I'm sure, and you count to ten before shouting, "Ready or not, here I come," how do you keep track of time? Is it a clock in your brain, as many neuroscientists assume and therefore search for in their research? Or is it something else? Maybe the rhythm of your vocalization as you say, "one-one thousand, two-one thousand"? Even if you’re counting silently, could it be that you’re imagining the movements of speaking aloud and tracking those virtual actions? My guest today, neuroscientist David Robbe, believes we don't rely on clocks in our brains, or measure time internally, or really that we measure time at all. Rather, our estimation of time emerges through our interactions with the world around us and/or the world within us as we behave.



David is group leader of the Cortical-Basal Ganglia Circuits and Behavior Lab at the Institute of Mediterranean Neurobiology. His perspective on how organisms measure time is the result of his own behavioral experiments with rodents, and by revisiting one of his favorite philosophers, Henri Bergson. So in this episode, we discuss how all of this came about - how neuroscientists have long searched for brain activity that measures or keeps track of time in areas like the basal ganglia, which is the brain region David focuses on, how the rodents he studies behave in surprising ways when he asks them to estimate time intervals, and how Bergson introduce the world to the notion of durée, our lived experience and feeling of time.




Cortical-Basal Ganglia Circuits and Behavior Lab.



Twitter: @dav_robbe



Related papers

Lost in time: Relocating the perception of duration outside the brain.



Running, Fast and Slow: The Dorsal Striatum Sets the Cost ofMovement During Foraging.






0:00 - Intro
3:59 - Why behavior is so important in itself
10:27 - Henri Bergson
21:17 - Bergson's view of life
26:25 - A task to test how animals time things
34:08 - Back to Bergson and duree
39:44 - Externalizing time
44:11 - Internal representation of time
1:03:38 - Cognition as internal movement
1:09:14 - Free will
1:15:27 - Implications for AI]]>
                </itunes:summary>
                                                                            <itunes:duration>01:37:37</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 203 David Krakauer: How To Think Like a Complexity Scientist]]>
                </title>
                <pubDate>Tue, 14 Jan 2025 05:00:00 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/1940145</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-203-david-krakauer-how-to-think-like-a-complexity-scientist-1</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes, full archive, and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>







<p><a href="https://www.thetransmitter.org/">The Transmitter</a> is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.</p>



<p>Read more about <a href="https://www.thetransmitter.org/partners/">our partnership</a>.</p>



<p>Sign up for the <a href="https://www.thetransmitter.org/newsletters/">“Brain Inspired” email alerts</a> to be notified every time a new “Brain Inspired” episode is released.</p>



<p>David Krakauer is the president of the Santa Fe Institute, where their mission is officially "Searching for Order in the Complexity of Evolving Worlds." When I think of the Santa Fe institute, I think of complexity science, because that is the common thread across the many subjects people study at SFI, like societies, economies, brains, machines, and evolution. David has been on before, and I invited him back to discuss some of the topics in his new book <a href="https://www.sfipress.org/books/the-complex-world">The Complex World: An Introduction to the Fundamentals of Complexity Science</a>.</p>



<p>The book on the one hand serves as an introduction and a guide to a 4 volume collection of foundational papers in complexity science, which you'll David discuss in a moment. On the other hand, The Complex World became much more, discussing and connecting ideas across the history of complexity science. Where did complexity science come from? How does it fit among other scientific paradigms? How did the breakthroughs come about? Along the way, we discuss the four pillars of complexity science - entropy, evolution, dynamics, and computation, and how complexity scientists draw from these four areas to study what David calls "problem-solving matter." We discuss emergence, the role of time scales, and plenty more all with my own self-serving goal to learn and practice how to think like a complexity scientist to improve my own work on how brains do things. Hopefully our conversation, and David's book, help you do the same.</p>





<ul class="wp-block-list">
<li><a href="https://davidckrakauer.com/">David's website</a>.</li>



<li><a href="https://www.santafe.edu/people/profile/david-krakauer">David's SFI homepage</a>.</li>



<li>The book: <a href="https://www.sfipress.org/books/the-complex-world">The Complex World: An Introduction to the Fundamentals of Complexity Science</a>.</li>



<li>The 4-Volume Series: <a href="https://www.sfipress.org/books/foundational-papers-in-complexity-science">Foundational Papers in Complexity Science.</a></li>



<li>Mentioned:
<ul class="wp-block-list">
<li>Aeon article: <a href="https://aeon.co/essays/is-life-a-complex-computational-process">Problem-solving matter</a>.</li>



<li><a href="https://link.springer.com/article/10.1007/s12064-020-00313-7">The information theory of individuality</a>.</li>
</ul>
</li>
</ul>



<p>Read the <a href="https://www.thetransmitter.org/wp-content/uploads/2025/01/BI-203-transcript-proof.pdf" target="_blank" rel="noreferrer noopener">transcript</a>.</p>



<p>0:00 - Intro
3:45 - Origins of The Complex World
20:10 - 4 pillars of complexity
36:27 - 40s to 70s in complexity
42:33 - How to proceed as a complexity scientist
54:32 - Broken symmetries
1:02:40 - Emergence
1:13:25 - Time scales and complexity
1:18:48 - Consensus and how ideas migrate
1:29:25 - Disciplinary matrix (Kuhn)
1:32:45 - Intelligence vs. life</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Support the show to get full episodes, full archive, and join the Discord community.











The Transmitter is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.



Read more about our partnership.



Sign up for the “Brain Inspired” email alerts to be notified every time a new “Brain Inspired” episode is released.



David Krakauer is the president of the Santa Fe Institute, where their mission is officially "Searching for Order in the Complexity of Evolving Worlds." When I think of the Santa Fe institute, I think of complexity science, because that is the common thread across the many subjects people study at SFI, like societies, economies, brains, machines, and evolution. David has been on before, and I invited him back to discuss some of the topics in his new book The Complex World: An Introduction to the Fundamentals of Complexity Science.



The book on the one hand serves as an introduction and a guide to a 4 volume collection of foundational papers in complexity science, which you'll David discuss in a moment. On the other hand, The Complex World became much more, discussing and connecting ideas across the history of complexity science. Where did complexity science come from? How does it fit among other scientific paradigms? How did the breakthroughs come about? Along the way, we discuss the four pillars of complexity science - entropy, evolution, dynamics, and computation, and how complexity scientists draw from these four areas to study what David calls "problem-solving matter." We discuss emergence, the role of time scales, and plenty more all with my own self-serving goal to learn and practice how to think like a complexity scientist to improve my own work on how brains do things. Hopefully our conversation, and David's book, help you do the same.






David's website.



David's SFI homepage.



The book: The Complex World: An Introduction to the Fundamentals of Complexity Science.



The 4-Volume Series: Foundational Papers in Complexity Science.



Mentioned:

Aeon article: Problem-solving matter.



The information theory of individuality.






Read the transcript.



0:00 - Intro
3:45 - Origins of The Complex World
20:10 - 4 pillars of complexity
36:27 - 40s to 70s in complexity
42:33 - How to proceed as a complexity scientist
54:32 - Broken symmetries
1:02:40 - Emergence
1:13:25 - Time scales and complexity
1:18:48 - Consensus and how ideas migrate
1:29:25 - Disciplinary matrix (Kuhn)
1:32:45 - Intelligence vs. life]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 203 David Krakauer: How To Think Like a Complexity Scientist]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes, full archive, and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>







<p><a href="https://www.thetransmitter.org/">The Transmitter</a> is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.</p>



<p>Read more about <a href="https://www.thetransmitter.org/partners/">our partnership</a>.</p>



<p>Sign up for the <a href="https://www.thetransmitter.org/newsletters/">“Brain Inspired” email alerts</a> to be notified every time a new “Brain Inspired” episode is released.</p>



<p>David Krakauer is the president of the Santa Fe Institute, where their mission is officially "Searching for Order in the Complexity of Evolving Worlds." When I think of the Santa Fe institute, I think of complexity science, because that is the common thread across the many subjects people study at SFI, like societies, economies, brains, machines, and evolution. David has been on before, and I invited him back to discuss some of the topics in his new book <a href="https://www.sfipress.org/books/the-complex-world">The Complex World: An Introduction to the Fundamentals of Complexity Science</a>.</p>



<p>The book on the one hand serves as an introduction and a guide to a 4 volume collection of foundational papers in complexity science, which you'll David discuss in a moment. On the other hand, The Complex World became much more, discussing and connecting ideas across the history of complexity science. Where did complexity science come from? How does it fit among other scientific paradigms? How did the breakthroughs come about? Along the way, we discuss the four pillars of complexity science - entropy, evolution, dynamics, and computation, and how complexity scientists draw from these four areas to study what David calls "problem-solving matter." We discuss emergence, the role of time scales, and plenty more all with my own self-serving goal to learn and practice how to think like a complexity scientist to improve my own work on how brains do things. Hopefully our conversation, and David's book, help you do the same.</p>





<ul class="wp-block-list">
<li><a href="https://davidckrakauer.com/">David's website</a>.</li>



<li><a href="https://www.santafe.edu/people/profile/david-krakauer">David's SFI homepage</a>.</li>



<li>The book: <a href="https://www.sfipress.org/books/the-complex-world">The Complex World: An Introduction to the Fundamentals of Complexity Science</a>.</li>



<li>The 4-Volume Series: <a href="https://www.sfipress.org/books/foundational-papers-in-complexity-science">Foundational Papers in Complexity Science.</a></li>



<li>Mentioned:
<ul class="wp-block-list">
<li>Aeon article: <a href="https://aeon.co/essays/is-life-a-complex-computational-process">Problem-solving matter</a>.</li>



<li><a href="https://link.springer.com/article/10.1007/s12064-020-00313-7">The information theory of individuality</a>.</li>
</ul>
</li>
</ul>



<p>Read the <a href="https://www.thetransmitter.org/wp-content/uploads/2025/01/BI-203-transcript-proof.pdf" target="_blank" rel="noreferrer noopener">transcript</a>.</p>



<p>0:00 - Intro
3:45 - Origins of The Complex World
20:10 - 4 pillars of complexity
36:27 - 40s to 70s in complexity
42:33 - How to proceed as a complexity scientist
54:32 - Broken symmetries
1:02:40 - Emergence
1:13:25 - Time scales and complexity
1:18:48 - Consensus and how ideas migrate
1:29:25 - Disciplinary matrix (Kuhn)
1:32:45 - Intelligence vs. life</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/1940145/c1e-6wvvc24q7nuj27q3-pkgg1rj1hdd6-ldluen.mp3" length="102990617"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Support the show to get full episodes, full archive, and join the Discord community.











The Transmitter is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.



Read more about our partnership.



Sign up for the “Brain Inspired” email alerts to be notified every time a new “Brain Inspired” episode is released.



David Krakauer is the president of the Santa Fe Institute, where their mission is officially "Searching for Order in the Complexity of Evolving Worlds." When I think of the Santa Fe institute, I think of complexity science, because that is the common thread across the many subjects people study at SFI, like societies, economies, brains, machines, and evolution. David has been on before, and I invited him back to discuss some of the topics in his new book The Complex World: An Introduction to the Fundamentals of Complexity Science.



The book on the one hand serves as an introduction and a guide to a 4 volume collection of foundational papers in complexity science, which you'll David discuss in a moment. On the other hand, The Complex World became much more, discussing and connecting ideas across the history of complexity science. Where did complexity science come from? How does it fit among other scientific paradigms? How did the breakthroughs come about? Along the way, we discuss the four pillars of complexity science - entropy, evolution, dynamics, and computation, and how complexity scientists draw from these four areas to study what David calls "problem-solving matter." We discuss emergence, the role of time scales, and plenty more all with my own self-serving goal to learn and practice how to think like a complexity scientist to improve my own work on how brains do things. Hopefully our conversation, and David's book, help you do the same.






David's website.



David's SFI homepage.



The book: The Complex World: An Introduction to the Fundamentals of Complexity Science.



The 4-Volume Series: Foundational Papers in Complexity Science.



Mentioned:

Aeon article: Problem-solving matter.



The information theory of individuality.






Read the transcript.



0:00 - Intro
3:45 - Origins of The Complex World
20:10 - 4 pillars of complexity
36:27 - 40s to 70s in complexity
42:33 - How to proceed as a complexity scientist
54:32 - Broken symmetries
1:02:40 - Emergence
1:13:25 - Time scales and complexity
1:18:48 - Consensus and how ideas migrate
1:29:25 - Disciplinary matrix (Kuhn)
1:32:45 - Intelligence vs. life]]>
                </itunes:summary>
                                                                            <itunes:duration>01:46:03</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 202 Eli Sennesh: Divide-and-Conquer to Predict]]>
                </title>
                <pubDate>Fri, 03 Jan 2025 05:00:00 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/1933016</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-202-eli-sennesh-divide-and-conquer-to-predict</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes, full archive, and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p><em><a href="http://thetransmitter.org/">The Transmitter</a></em> is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.</p>



<p>Read more about <a href="https://www.thetransmitter.org/partners/">our partnership</a>.</p>



<p>Sign up for the <a href="https://www.thetransmitter.org/newsletters/">“Brain Inspired” email alerts</a> to be notified every time a new Brain Inspired episode is released<a href="https://www.thetransmitter.org/newsletters/">.</a></p>









<p>Eli Sennesh is a postdoc at Vanderbilt University, one of my old stomping grounds, currently in the lab of Andre Bastos. Andre’s lab focuses on understanding brain dynamics within cortical circuits, particularly how communication between brain areas is coordinated in perception, cognition, and behavior. So Eli is busy doing work along those lines, as you'll hear more about. But the original impetus for having him on his recently published proposal for how predictive coding might be implemented in brains. So in that sense, this episode builds on the last episode with Rajesh Rao, where we discussed Raj's "active predictive coding" account of predictive coding.  As a super brief refresher, predictive coding is the proposal that the brain is constantly predicting what's about the happen, then stuff happens, and the brain uses the mismatch between its predictions and the actual stuff that's happening, to learn how to make better predictions moving forward. I refer you to the previous episode for more details. So Eli's account, along with his co-authors of course, which he calls "divide-and-conquer" predictive coding, uses a probabilistic approach in an attempt to account for how brains might implement predictive coding, and you'll learn more about that in our discussion. But we also talk quite a bit about the difference between practicing theoretical and experimental neuroscience, and Eli's experience moving into the experimental side from the theoretical side.</p>



<ul class="wp-block-list">
<li><a href="https://esennesh.github.io/">Eli's website</a>.</li>



<li><a href="https://www.bastoslabvu.com/">Bastos lab</a>.</li>



<li>Twitter: <a href="https://x.com/EliSennesh">@EliSennesh</a></li>



<li>Related papers
<ul class="wp-block-list">
<li><a href="https://arxiv.org/pdf/2408.05834">Divide-and-Conquer Predictive Coding: a Structured Bayesian Inference Algorithm</a>.</li>
</ul>
</li>



<li>Related episode:
<ul class="wp-block-list">
<li><a href="https://braininspired.co/podcast/201/">BI 201 Rajesh Rao: Active Predictive Coding</a>.</li>
</ul>
</li>
</ul>



<p>Read the <a href="https://www.thetransmitter.org/wp-content/uploads/2024/12/BI-202-transcript-proof.pdf" target="_blank" rel="noreferrer noopener">transcript</a>.</p>



<p>0:00 - Intro
3:59 - Eli's worldview
17:56 - NeuroAI is hard
24:38 - Prediction errors vs surprise
55:16 - Divide and conquer
1:13:24 - Challenges
1:18:44 - How to build AI
1:25:56 - Affect
1:31:55 - Abolish the value function</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Support the show to get full episodes, full archive, and join the Discord community.









The Transmitter is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.



Read more about our partnership.



Sign up for the “Brain Inspired” email alerts to be notified every time a new Brain Inspired episode is released.









Eli Sennesh is a postdoc at Vanderbilt University, one of my old stomping grounds, currently in the lab of Andre Bastos. Andre’s lab focuses on understanding brain dynamics within cortical circuits, particularly how communication between brain areas is coordinated in perception, cognition, and behavior. So Eli is busy doing work along those lines, as you'll hear more about. But the original impetus for having him on his recently published proposal for how predictive coding might be implemented in brains. So in that sense, this episode builds on the last episode with Rajesh Rao, where we discussed Raj's "active predictive coding" account of predictive coding.  As a super brief refresher, predictive coding is the proposal that the brain is constantly predicting what's about the happen, then stuff happens, and the brain uses the mismatch between its predictions and the actual stuff that's happening, to learn how to make better predictions moving forward. I refer you to the previous episode for more details. So Eli's account, along with his co-authors of course, which he calls "divide-and-conquer" predictive coding, uses a probabilistic approach in an attempt to account for how brains might implement predictive coding, and you'll learn more about that in our discussion. But we also talk quite a bit about the difference between practicing theoretical and experimental neuroscience, and Eli's experience moving into the experimental side from the theoretical side.




Eli's website.



Bastos lab.



Twitter: @EliSennesh



Related papers

Divide-and-Conquer Predictive Coding: a Structured Bayesian Inference Algorithm.





Related episode:

BI 201 Rajesh Rao: Active Predictive Coding.






Read the transcript.



0:00 - Intro
3:59 - Eli's worldview
17:56 - NeuroAI is hard
24:38 - Prediction errors vs surprise
55:16 - Divide and conquer
1:13:24 - Challenges
1:18:44 - How to build AI
1:25:56 - Affect
1:31:55 - Abolish the value function]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 202 Eli Sennesh: Divide-and-Conquer to Predict]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes, full archive, and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p><em><a href="http://thetransmitter.org/">The Transmitter</a></em> is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.</p>



<p>Read more about <a href="https://www.thetransmitter.org/partners/">our partnership</a>.</p>



<p>Sign up for the <a href="https://www.thetransmitter.org/newsletters/">“Brain Inspired” email alerts</a> to be notified every time a new Brain Inspired episode is released<a href="https://www.thetransmitter.org/newsletters/">.</a></p>









<p>Eli Sennesh is a postdoc at Vanderbilt University, one of my old stomping grounds, currently in the lab of Andre Bastos. Andre’s lab focuses on understanding brain dynamics within cortical circuits, particularly how communication between brain areas is coordinated in perception, cognition, and behavior. So Eli is busy doing work along those lines, as you'll hear more about. But the original impetus for having him on his recently published proposal for how predictive coding might be implemented in brains. So in that sense, this episode builds on the last episode with Rajesh Rao, where we discussed Raj's "active predictive coding" account of predictive coding.  As a super brief refresher, predictive coding is the proposal that the brain is constantly predicting what's about the happen, then stuff happens, and the brain uses the mismatch between its predictions and the actual stuff that's happening, to learn how to make better predictions moving forward. I refer you to the previous episode for more details. So Eli's account, along with his co-authors of course, which he calls "divide-and-conquer" predictive coding, uses a probabilistic approach in an attempt to account for how brains might implement predictive coding, and you'll learn more about that in our discussion. But we also talk quite a bit about the difference between practicing theoretical and experimental neuroscience, and Eli's experience moving into the experimental side from the theoretical side.</p>



<ul class="wp-block-list">
<li><a href="https://esennesh.github.io/">Eli's website</a>.</li>



<li><a href="https://www.bastoslabvu.com/">Bastos lab</a>.</li>



<li>Twitter: <a href="https://x.com/EliSennesh">@EliSennesh</a></li>



<li>Related papers
<ul class="wp-block-list">
<li><a href="https://arxiv.org/pdf/2408.05834">Divide-and-Conquer Predictive Coding: a Structured Bayesian Inference Algorithm</a>.</li>
</ul>
</li>



<li>Related episode:
<ul class="wp-block-list">
<li><a href="https://braininspired.co/podcast/201/">BI 201 Rajesh Rao: Active Predictive Coding</a>.</li>
</ul>
</li>
</ul>



<p>Read the <a href="https://www.thetransmitter.org/wp-content/uploads/2024/12/BI-202-transcript-proof.pdf" target="_blank" rel="noreferrer noopener">transcript</a>.</p>



<p>0:00 - Intro
3:59 - Eli's worldview
17:56 - NeuroAI is hard
24:38 - Prediction errors vs surprise
55:16 - Divide and conquer
1:13:24 - Challenges
1:18:44 - How to build AI
1:25:56 - Affect
1:31:55 - Abolish the value function</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/1933016/c1e-2k22h8225kc8r98r-6zwkp27vfqnw-5rr1qw.mp3" length="95173640"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Support the show to get full episodes, full archive, and join the Discord community.









The Transmitter is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.



Read more about our partnership.



Sign up for the “Brain Inspired” email alerts to be notified every time a new Brain Inspired episode is released.









Eli Sennesh is a postdoc at Vanderbilt University, one of my old stomping grounds, currently in the lab of Andre Bastos. Andre’s lab focuses on understanding brain dynamics within cortical circuits, particularly how communication between brain areas is coordinated in perception, cognition, and behavior. So Eli is busy doing work along those lines, as you'll hear more about. But the original impetus for having him on his recently published proposal for how predictive coding might be implemented in brains. So in that sense, this episode builds on the last episode with Rajesh Rao, where we discussed Raj's "active predictive coding" account of predictive coding.  As a super brief refresher, predictive coding is the proposal that the brain is constantly predicting what's about the happen, then stuff happens, and the brain uses the mismatch between its predictions and the actual stuff that's happening, to learn how to make better predictions moving forward. I refer you to the previous episode for more details. So Eli's account, along with his co-authors of course, which he calls "divide-and-conquer" predictive coding, uses a probabilistic approach in an attempt to account for how brains might implement predictive coding, and you'll learn more about that in our discussion. But we also talk quite a bit about the difference between practicing theoretical and experimental neuroscience, and Eli's experience moving into the experimental side from the theoretical side.




Eli's website.



Bastos lab.



Twitter: @EliSennesh



Related papers

Divide-and-Conquer Predictive Coding: a Structured Bayesian Inference Algorithm.





Related episode:

BI 201 Rajesh Rao: Active Predictive Coding.






Read the transcript.



0:00 - Intro
3:59 - Eli's worldview
17:56 - NeuroAI is hard
24:38 - Prediction errors vs surprise
55:16 - Divide and conquer
1:13:24 - Challenges
1:18:44 - How to build AI
1:25:56 - Affect
1:31:55 - Abolish the value function]]>
                </itunes:summary>
                                                                            <itunes:duration>01:38:11</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 201 Rajesh Rao: From Predictive Coding to Brain Co-Processors]]>
                </title>
                <pubDate>Wed, 18 Dec 2024 05:00:00 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/1922841</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-201-rajesh-rao-from-predictive-coding-to-brain-co-processors-1</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes, full archive, and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>







<p>Today I'm in conversation with Rajesh Rao, a distinguished professor of computer science and engineering at the University of Washington, where he also co-directs the Center for Neurotechnology. Back in 1999, Raj and Dana Ballard published what became <a href="https://pubmed.ncbi.nlm.nih.gov/10195184/">quite a famous paper</a>, which proposed how predictive coding might be implemented in brains. What is predictive coding, you may be wondering? It's roughly the idea that your brain is constantly predicting incoming sensory signals, and it generates that prediction as a top-down signal that meets the bottom-up sensory signals. Then the brain computes a difference between the prediction and the actual sensory input, and that difference is sent back up to the "top" where the brain then updates its internal model to make better future predictions.</p>



<p>So that was 25 years ago, and it was focused on how the brain handles sensory information. But Raj just recently published an update to the predictive coding framework, one that incorporates actions and perception, suggests how it might be implemented in the cortex - specifically which cortical layers do what - something he calls "Active predictive coding." So we discuss that new proposal, we also talk about his engineering work on brain-computer interface technologies, like BrainNet, which basically connects two brains together, and like neural co-processors, which use an artificial neural network as a prosthetic that can do things like enhance memories, optimize learning, and help restore brain function after strokes, for example. Finally, we discuss Raj's interest and work on deciphering an ancient Indian text, the mysterious Indus script.</p>



<ul class="wp-block-list">
<li><a href="https://www.rajeshpnrao.com/">Raj's website</a>.</li>



<li>Twitter: <a href="https://x.com/RajeshPNRao">@RajeshPNRao</a>.</li>



<li>Related papers
<ul class="wp-block-list">
<li><a href="https://www.nature.com/articles/s41593-024-01673-9">A sensory–motor theory of the neocortex</a>.</li>



<li><a href="https://arxiv.org/pdf/2012.03378">Brain co-processors: using AI to restore and augment brain function</a>.</li>



<li><a href="https://www.sciencedirect.com/science/article/pii/S0959438818301843">Towards neural co-processors for the brain: combining decoding and encoding in brain–computer interfaces</a>.</li>



<li><a href="https://www.nature.com/articles/s41598-019-41895-7">BrainNet: A Multi-Person Brain-to-Brain Interface for Direct Collaboration Between Brains</a>.</li>
</ul>
</li>
</ul>



<p>Read the <a href="https://www.thetransmitter.org/wp-content/uploads/2024/12/BI-201-transcript.pdf" target="_blank" rel="noreferrer noopener">transcript</a>.</p>



<p>0:00 - Intro
7:40 - Predictive coding origins
16:14 - Early appreciation of recurrence
17:08 - Prediction as a general theory of the brain
18:38 - Rao and Ballard 1999
26:32 - Prediction as a general theory of the brain
33:24 - Perception vs action
33:28 - Active predictive coding
45:04 - Evolving to augment our brains
53:03 - BrainNet
57:12 - Neural co-processors
1:11:19 - Decoding the Indus Script
1:20:18 - Transformer models relation to active predictive coding</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Support the show to get full episodes, full archive, and join the Discord community.











Today I'm in conversation with Rajesh Rao, a distinguished professor of computer science and engineering at the University of Washington, where he also co-directs the Center for Neurotechnology. Back in 1999, Raj and Dana Ballard published what became quite a famous paper, which proposed how predictive coding might be implemented in brains. What is predictive coding, you may be wondering? It's roughly the idea that your brain is constantly predicting incoming sensory signals, and it generates that prediction as a top-down signal that meets the bottom-up sensory signals. Then the brain computes a difference between the prediction and the actual sensory input, and that difference is sent back up to the "top" where the brain then updates its internal model to make better future predictions.



So that was 25 years ago, and it was focused on how the brain handles sensory information. But Raj just recently published an update to the predictive coding framework, one that incorporates actions and perception, suggests how it might be implemented in the cortex - specifically which cortical layers do what - something he calls "Active predictive coding." So we discuss that new proposal, we also talk about his engineering work on brain-computer interface technologies, like BrainNet, which basically connects two brains together, and like neural co-processors, which use an artificial neural network as a prosthetic that can do things like enhance memories, optimize learning, and help restore brain function after strokes, for example. Finally, we discuss Raj's interest and work on deciphering an ancient Indian text, the mysterious Indus script.




Raj's website.



Twitter: @RajeshPNRao.



Related papers

A sensory–motor theory of the neocortex.



Brain co-processors: using AI to restore and augment brain function.



Towards neural co-processors for the brain: combining decoding and encoding in brain–computer interfaces.



BrainNet: A Multi-Person Brain-to-Brain Interface for Direct Collaboration Between Brains.






Read the transcript.



0:00 - Intro
7:40 - Predictive coding origins
16:14 - Early appreciation of recurrence
17:08 - Prediction as a general theory of the brain
18:38 - Rao and Ballard 1999
26:32 - Prediction as a general theory of the brain
33:24 - Perception vs action
33:28 - Active predictive coding
45:04 - Evolving to augment our brains
53:03 - BrainNet
57:12 - Neural co-processors
1:11:19 - Decoding the Indus Script
1:20:18 - Transformer models relation to active predictive coding]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 201 Rajesh Rao: From Predictive Coding to Brain Co-Processors]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes, full archive, and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>







<p>Today I'm in conversation with Rajesh Rao, a distinguished professor of computer science and engineering at the University of Washington, where he also co-directs the Center for Neurotechnology. Back in 1999, Raj and Dana Ballard published what became <a href="https://pubmed.ncbi.nlm.nih.gov/10195184/">quite a famous paper</a>, which proposed how predictive coding might be implemented in brains. What is predictive coding, you may be wondering? It's roughly the idea that your brain is constantly predicting incoming sensory signals, and it generates that prediction as a top-down signal that meets the bottom-up sensory signals. Then the brain computes a difference between the prediction and the actual sensory input, and that difference is sent back up to the "top" where the brain then updates its internal model to make better future predictions.</p>



<p>So that was 25 years ago, and it was focused on how the brain handles sensory information. But Raj just recently published an update to the predictive coding framework, one that incorporates actions and perception, suggests how it might be implemented in the cortex - specifically which cortical layers do what - something he calls "Active predictive coding." So we discuss that new proposal, we also talk about his engineering work on brain-computer interface technologies, like BrainNet, which basically connects two brains together, and like neural co-processors, which use an artificial neural network as a prosthetic that can do things like enhance memories, optimize learning, and help restore brain function after strokes, for example. Finally, we discuss Raj's interest and work on deciphering an ancient Indian text, the mysterious Indus script.</p>



<ul class="wp-block-list">
<li><a href="https://www.rajeshpnrao.com/">Raj's website</a>.</li>



<li>Twitter: <a href="https://x.com/RajeshPNRao">@RajeshPNRao</a>.</li>



<li>Related papers
<ul class="wp-block-list">
<li><a href="https://www.nature.com/articles/s41593-024-01673-9">A sensory–motor theory of the neocortex</a>.</li>



<li><a href="https://arxiv.org/pdf/2012.03378">Brain co-processors: using AI to restore and augment brain function</a>.</li>



<li><a href="https://www.sciencedirect.com/science/article/pii/S0959438818301843">Towards neural co-processors for the brain: combining decoding and encoding in brain–computer interfaces</a>.</li>



<li><a href="https://www.nature.com/articles/s41598-019-41895-7">BrainNet: A Multi-Person Brain-to-Brain Interface for Direct Collaboration Between Brains</a>.</li>
</ul>
</li>
</ul>



<p>Read the <a href="https://www.thetransmitter.org/wp-content/uploads/2024/12/BI-201-transcript.pdf" target="_blank" rel="noreferrer noopener">transcript</a>.</p>



<p>0:00 - Intro
7:40 - Predictive coding origins
16:14 - Early appreciation of recurrence
17:08 - Prediction as a general theory of the brain
18:38 - Rao and Ballard 1999
26:32 - Prediction as a general theory of the brain
33:24 - Perception vs action
33:28 - Active predictive coding
45:04 - Evolving to augment our brains
53:03 - BrainNet
57:12 - Neural co-processors
1:11:19 - Decoding the Indus Script
1:20:18 - Transformer models relation to active predictive coding</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/1922841/c1e-vzkkt91n7kaqdzn7-7zk6pgvzsvw7-f0k3p2.mp3" length="94607178"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Support the show to get full episodes, full archive, and join the Discord community.











Today I'm in conversation with Rajesh Rao, a distinguished professor of computer science and engineering at the University of Washington, where he also co-directs the Center for Neurotechnology. Back in 1999, Raj and Dana Ballard published what became quite a famous paper, which proposed how predictive coding might be implemented in brains. What is predictive coding, you may be wondering? It's roughly the idea that your brain is constantly predicting incoming sensory signals, and it generates that prediction as a top-down signal that meets the bottom-up sensory signals. Then the brain computes a difference between the prediction and the actual sensory input, and that difference is sent back up to the "top" where the brain then updates its internal model to make better future predictions.



So that was 25 years ago, and it was focused on how the brain handles sensory information. But Raj just recently published an update to the predictive coding framework, one that incorporates actions and perception, suggests how it might be implemented in the cortex - specifically which cortical layers do what - something he calls "Active predictive coding." So we discuss that new proposal, we also talk about his engineering work on brain-computer interface technologies, like BrainNet, which basically connects two brains together, and like neural co-processors, which use an artificial neural network as a prosthetic that can do things like enhance memories, optimize learning, and help restore brain function after strokes, for example. Finally, we discuss Raj's interest and work on deciphering an ancient Indian text, the mysterious Indus script.




Raj's website.



Twitter: @RajeshPNRao.



Related papers

A sensory–motor theory of the neocortex.



Brain co-processors: using AI to restore and augment brain function.



Towards neural co-processors for the brain: combining decoding and encoding in brain–computer interfaces.



BrainNet: A Multi-Person Brain-to-Brain Interface for Direct Collaboration Between Brains.






Read the transcript.



0:00 - Intro
7:40 - Predictive coding origins
16:14 - Early appreciation of recurrence
17:08 - Prediction as a general theory of the brain
18:38 - Rao and Ballard 1999
26:32 - Prediction as a general theory of the brain
33:24 - Perception vs action
33:28 - Active predictive coding
45:04 - Evolving to augment our brains
53:03 - BrainNet
57:12 - Neural co-processors
1:11:19 - Decoding the Indus Script
1:20:18 - Transformer models relation to active predictive coding]]>
                </itunes:summary>
                                                                            <itunes:duration>01:37:22</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 200 Grace Hwang and Joe Monaco: The Future of NeuroAI]]>
                </title>
                <pubDate>Wed, 04 Dec 2024 05:00:00 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/1913289</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-200-grace-hwang-and-joe-monaco-the-future-of-neuroai</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>







<p>Joe Monaco and Grace Hwang  co-organized a recent workshop I participated in, the <a href="https://n4solutionsllc.com/brainneuroai/">2024 BRAIN NeuroAI Workshop</a>. You may have heard of the BRAIN Initiative, but in case not, BRAIN is is huge funding effort across many agencies, one of which is the National Institutes of Health, where this recent workshop was held. The BRAIN Initiative began in 2013 under the Obama administration, with the goal to support developing technologies to help understand the human brain, so we can cure brain based diseases.</p>



<p>BRAIN Initiative just became a decade old, with many successes like recent whole brain connectomes, and discovering the vast array of cell types. Now the question is how to move forward, and one area they are curious about, that perhaps has a lot of potential to support their mission, is the recent convergence of neuroscience and AI... or NeuroAI. The workshop was designed to explore how NeuroAI might contribute moving forward, and to hear from NeuroAI folks how they envision the field moving forward. You'll hear more about that in a moment.</p>





<p>That's one reason I invited Grace and Joe on. Another reason is because they co-wrote a position paper a while back that is impressive as a synthesis of lots of cognitive sciences concepts, but also proposes a specific level of abstraction and scale in brain processes that may serve as a base layer for computation. The paper is called Neurodynamical Computing at the Information Boundaries, of Intelligent Systems, and you'll learn more about that in this episode.</p>



<ul class="wp-block-list">
<li><a href="https://www.ninds.nih.gov/about-ninds/who-we-are/staff-directory/joseph-monaco">Joe's NIH page</a>.</li>



<li><a href="https://www.ninds.nih.gov/about-ninds/who-we-are/staff-directory/grace-m-hwang">Grace's NIH page</a>.</li>



<li>Twitter: 
<ul class="wp-block-list">
<li>Joe: <a href="https://x.com/j_d_monaco">@j_d_monaco</a></li>
</ul>
</li>



<li>Related papers
<ul class="wp-block-list">
<li><a href="https://link.springer.com/article/10.1007/s12559-022-10081-9">Neurodynamical Computing at the Information Boundaries of Intelligent Systems</a>.</li>



<li><a href="https://link.springer.com/article/10.1007/s00422-020-00823-z">Cognitive swarming in complex environments with attractor dynamics and oscillatory computing</a>.</li>



<li><a href="https://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1006741">Spatial synchronization codes from coupled rate-phase neurons</a>.</li>



<li><a href="https://www.nature.com/articles/s41467-017-01190-3">Oscillators that sync and swarm</a>.</li>
</ul>
</li>



<li>Mentioned
<ul class="wp-block-list">
<li><a href="http://dx.doi.org/10.1016/j.bica.2016.11.002">A historical survey of algorithms and hardware architectures for neural-inspired and neuromorphic computing applications</a>.</li>



<li><a href="http://dx.doi.org/10.1002/hipo.23027">Recalling Lashley and reconsolidating Hebb</a>.</li>



<li>BRAIN NeuroAI Workshop (Nov 12–13)
<ul class="wp-block-list">
<li><a href="https://n4solutionsllc.com/wp-content/uploads/2024/11/NIH_BRAIN_NeuroAI_Workshop_Program_Book_508c.pdf">NIH BRAIN NeuroAI Workshop Program Book</a></li>



<li><a href="https://videocast.nih.gov/watch=55160">NIH VideoCast – Day 1 Recording – BRAIN NeuroAI Workshop</a></li>



<li><a href="https://videocast.nih.gov/watch=55262">NIH VideoCast – Day 2 Recording – BRAIN NeuroAI Workshop</a></li>
</ul>
</li>



<li>Neuromorphic Principles in Biomedicine and Healthcare Workshop (Oct 21–22)
<ul class="wp-block-list">
<li><a href="https://2024.neuro-med.org/">NPBH 2024</a></li>
</ul>
</li>



<li>BRAIN Investigators Meeting 2020 Symposium &amp; Perspective Pap...</li></ul></li></ul>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Support the show to get full episodes and join the Discord community.











Joe Monaco and Grace Hwang  co-organized a recent workshop I participated in, the 2024 BRAIN NeuroAI Workshop. You may have heard of the BRAIN Initiative, but in case not, BRAIN is is huge funding effort across many agencies, one of which is the National Institutes of Health, where this recent workshop was held. The BRAIN Initiative began in 2013 under the Obama administration, with the goal to support developing technologies to help understand the human brain, so we can cure brain based diseases.



BRAIN Initiative just became a decade old, with many successes like recent whole brain connectomes, and discovering the vast array of cell types. Now the question is how to move forward, and one area they are curious about, that perhaps has a lot of potential to support their mission, is the recent convergence of neuroscience and AI... or NeuroAI. The workshop was designed to explore how NeuroAI might contribute moving forward, and to hear from NeuroAI folks how they envision the field moving forward. You'll hear more about that in a moment.





That's one reason I invited Grace and Joe on. Another reason is because they co-wrote a position paper a while back that is impressive as a synthesis of lots of cognitive sciences concepts, but also proposes a specific level of abstraction and scale in brain processes that may serve as a base layer for computation. The paper is called Neurodynamical Computing at the Information Boundaries, of Intelligent Systems, and you'll learn more about that in this episode.




Joe's NIH page.



Grace's NIH page.



Twitter: 

Joe: @j_d_monaco





Related papers

Neurodynamical Computing at the Information Boundaries of Intelligent Systems.



Cognitive swarming in complex environments with attractor dynamics and oscillatory computing.



Spatial synchronization codes from coupled rate-phase neurons.



Oscillators that sync and swarm.





Mentioned

A historical survey of algorithms and hardware architectures for neural-inspired and neuromorphic computing applications.



Recalling Lashley and reconsolidating Hebb.



BRAIN NeuroAI Workshop (Nov 12–13)

NIH BRAIN NeuroAI Workshop Program Book



NIH VideoCast – Day 1 Recording – BRAIN NeuroAI Workshop



NIH VideoCast – Day 2 Recording – BRAIN NeuroAI Workshop





Neuromorphic Principles in Biomedicine and Healthcare Workshop (Oct 21–22)

NPBH 2024





BRAIN Investigators Meeting 2020 Symposium & Perspective Pap...]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 200 Grace Hwang and Joe Monaco: The Future of NeuroAI]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>







<p>Joe Monaco and Grace Hwang  co-organized a recent workshop I participated in, the <a href="https://n4solutionsllc.com/brainneuroai/">2024 BRAIN NeuroAI Workshop</a>. You may have heard of the BRAIN Initiative, but in case not, BRAIN is is huge funding effort across many agencies, one of which is the National Institutes of Health, where this recent workshop was held. The BRAIN Initiative began in 2013 under the Obama administration, with the goal to support developing technologies to help understand the human brain, so we can cure brain based diseases.</p>



<p>BRAIN Initiative just became a decade old, with many successes like recent whole brain connectomes, and discovering the vast array of cell types. Now the question is how to move forward, and one area they are curious about, that perhaps has a lot of potential to support their mission, is the recent convergence of neuroscience and AI... or NeuroAI. The workshop was designed to explore how NeuroAI might contribute moving forward, and to hear from NeuroAI folks how they envision the field moving forward. You'll hear more about that in a moment.</p>





<p>That's one reason I invited Grace and Joe on. Another reason is because they co-wrote a position paper a while back that is impressive as a synthesis of lots of cognitive sciences concepts, but also proposes a specific level of abstraction and scale in brain processes that may serve as a base layer for computation. The paper is called Neurodynamical Computing at the Information Boundaries, of Intelligent Systems, and you'll learn more about that in this episode.</p>



<ul class="wp-block-list">
<li><a href="https://www.ninds.nih.gov/about-ninds/who-we-are/staff-directory/joseph-monaco">Joe's NIH page</a>.</li>



<li><a href="https://www.ninds.nih.gov/about-ninds/who-we-are/staff-directory/grace-m-hwang">Grace's NIH page</a>.</li>



<li>Twitter: 
<ul class="wp-block-list">
<li>Joe: <a href="https://x.com/j_d_monaco">@j_d_monaco</a></li>
</ul>
</li>



<li>Related papers
<ul class="wp-block-list">
<li><a href="https://link.springer.com/article/10.1007/s12559-022-10081-9">Neurodynamical Computing at the Information Boundaries of Intelligent Systems</a>.</li>



<li><a href="https://link.springer.com/article/10.1007/s00422-020-00823-z">Cognitive swarming in complex environments with attractor dynamics and oscillatory computing</a>.</li>



<li><a href="https://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1006741">Spatial synchronization codes from coupled rate-phase neurons</a>.</li>



<li><a href="https://www.nature.com/articles/s41467-017-01190-3">Oscillators that sync and swarm</a>.</li>
</ul>
</li>



<li>Mentioned
<ul class="wp-block-list">
<li><a href="http://dx.doi.org/10.1016/j.bica.2016.11.002">A historical survey of algorithms and hardware architectures for neural-inspired and neuromorphic computing applications</a>.</li>



<li><a href="http://dx.doi.org/10.1002/hipo.23027">Recalling Lashley and reconsolidating Hebb</a>.</li>



<li>BRAIN NeuroAI Workshop (Nov 12–13)
<ul class="wp-block-list">
<li><a href="https://n4solutionsllc.com/wp-content/uploads/2024/11/NIH_BRAIN_NeuroAI_Workshop_Program_Book_508c.pdf">NIH BRAIN NeuroAI Workshop Program Book</a></li>



<li><a href="https://videocast.nih.gov/watch=55160">NIH VideoCast – Day 1 Recording – BRAIN NeuroAI Workshop</a></li>



<li><a href="https://videocast.nih.gov/watch=55262">NIH VideoCast – Day 2 Recording – BRAIN NeuroAI Workshop</a></li>
</ul>
</li>



<li>Neuromorphic Principles in Biomedicine and Healthcare Workshop (Oct 21–22)
<ul class="wp-block-list">
<li><a href="https://2024.neuro-med.org/">NPBH 2024</a></li>
</ul>
</li>



<li>BRAIN Investigators Meeting 2020 Symposium &amp; Perspective Paper
<ul class="wp-block-list">
<li><a href="https://www.youtube.com/watch?v=2jy1ENYHRAw">BRAIN 2020 Symposium on Dynamical Systems Neuroscience and Machine Learning</a> (YouTube)</li>



<li><a href="https://link.springer.com/article/10.1007/s12559-022-10081-9">Neurodynamical Computing at the Information Boundaries of Intelligent Systems | Cognitive Computation</a></li>
</ul>
</li>



<li>NSF/CIRC
<ul class="wp-block-list">
<li><a href="https://new.nsf.gov/funding/opportunities/circ-community-infrastructure-research-computer-information">Community Infrastructure for Research in Computer and Information Science and Engineering (CIRC) | NSF - National Science Foundation</a></li>



<li><a href="https://ai.utsa.edu/thor/">THOR Neuromorphic Commons - Matrix: The UTSA AI Consortium for Human Well-Being</a></li>
</ul>
</li>
</ul>
</li>
</ul>



<p>Read the <a href="https://www.thetransmitter.org/wp-content/uploads/2024/12/BI-200-transcript-final.pdf" target="_blank" rel="noreferrer noopener">transcript</a>.</p>



<p>0:00 - Intro
25:45 - NeuroAI Workshop - neuromorphics
33:31 - Neuromorphics and theory
49:19 - Reflections on the workshop
54:22 - Neurodynamical computing and information boundaries
1:01:04 - Perceptual control theory
1:08:56 - Digital twins and neural foundation models
1:14:02 - Base layer of computation</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/1913289/c1e-1dmmfjzv4vfw4wzk-v6zm2oo4f5o4-h7dqdf.mp3" length="94071308"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Support the show to get full episodes and join the Discord community.











Joe Monaco and Grace Hwang  co-organized a recent workshop I participated in, the 2024 BRAIN NeuroAI Workshop. You may have heard of the BRAIN Initiative, but in case not, BRAIN is is huge funding effort across many agencies, one of which is the National Institutes of Health, where this recent workshop was held. The BRAIN Initiative began in 2013 under the Obama administration, with the goal to support developing technologies to help understand the human brain, so we can cure brain based diseases.



BRAIN Initiative just became a decade old, with many successes like recent whole brain connectomes, and discovering the vast array of cell types. Now the question is how to move forward, and one area they are curious about, that perhaps has a lot of potential to support their mission, is the recent convergence of neuroscience and AI... or NeuroAI. The workshop was designed to explore how NeuroAI might contribute moving forward, and to hear from NeuroAI folks how they envision the field moving forward. You'll hear more about that in a moment.





That's one reason I invited Grace and Joe on. Another reason is because they co-wrote a position paper a while back that is impressive as a synthesis of lots of cognitive sciences concepts, but also proposes a specific level of abstraction and scale in brain processes that may serve as a base layer for computation. The paper is called Neurodynamical Computing at the Information Boundaries, of Intelligent Systems, and you'll learn more about that in this episode.




Joe's NIH page.



Grace's NIH page.



Twitter: 

Joe: @j_d_monaco





Related papers

Neurodynamical Computing at the Information Boundaries of Intelligent Systems.



Cognitive swarming in complex environments with attractor dynamics and oscillatory computing.



Spatial synchronization codes from coupled rate-phase neurons.



Oscillators that sync and swarm.





Mentioned

A historical survey of algorithms and hardware architectures for neural-inspired and neuromorphic computing applications.



Recalling Lashley and reconsolidating Hebb.



BRAIN NeuroAI Workshop (Nov 12–13)

NIH BRAIN NeuroAI Workshop Program Book



NIH VideoCast – Day 1 Recording – BRAIN NeuroAI Workshop



NIH VideoCast – Day 2 Recording – BRAIN NeuroAI Workshop





Neuromorphic Principles in Biomedicine and Healthcare Workshop (Oct 21–22)

NPBH 2024





BRAIN Investigators Meeting 2020 Symposium & Perspective Pap...]]>
                </itunes:summary>
                                                                            <itunes:duration>01:37:11</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 199 Hessam Akhlaghpour: Natural Universal Computation]]>
                </title>
                <pubDate>Tue, 26 Nov 2024 05:00:00 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/1908491</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-199-hessam-akhlaghpour-natural-universal-computation-1</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>







<p><em>The Transmitter </em>is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.</p>



<p>Read more about <a href="https://www.thetransmitter.org/partners/">our partnership</a>.</p>



<p>Sign up for the “Brain Inspired” email alerts to be notified every time a new “Brain Inspired” episode is released: <a href="https://www.thetransmitter.org/newsletters/">https://www.thetransmitter.org/newsletters/</a></p>



<p>To explore more neuroscience news and perspectives, visit <a href="http://thetransmitter.org">thetransmitter.org</a>.</p>



<p>Hessam Akhlaghpour is a postdoctoral researcher at Rockefeller University in the Maimon lab. His experimental work is in fly neuroscience mostly studying spatial memories in fruit flies. However, we are going to be talking about a different (although somewhat related) side of his postdoctoral research. This aspect of his work involves theoretical explorations of molecular computation, which are deeply inspired by Randy Gallistel and Adam King's book Memory and the Computational Brain. Randy has been on the podcast before to discuss his ideas that memory needs to be stored in something more stable than the synapses between neurons, and how that something could be genetic material like RNA. When Hessam read this book, he was re-inspired to think of the brain the way he used to think of it before experimental neuroscience challenged his views. It re-inspired him to think of the brain as a computational system. But it also led to what we discuss today, the idea that RNA has the capacity for universal computation, and Hessam's development of how that might happen. So we discuss that background and story, why universal computation has been discovered in organisms yet since surely evolution has stumbled upon it, and how RNA might and combinatory logic could implement universal computation in nature.</p>



<ul class="wp-block-list">
<li><a href="https://www.akhlaghpour.info/">Hessam's website</a>.</li>



<li><a href="https://maimonlab.rockefeller.edu/">Maimon Lab</a>.</li>



<li>Twitter: <a href="https://x.com/theHessam">@theHessam</a>.</li>



<li>Related papers
<ul class="wp-block-list">
<li><a href="https://www.sciencedirect.com/science/article/pii/S0022519321004045">An RNA-based theory of natural universal computation</a>.</li>



<li><a href="https://arxiv.org/abs/2209.04923">The molecular memory code and synaptic plasticity: a synthesis</a>.</li>



<li><a href="https://doi.org/10.1126/science.adf3481">Lifelong persistence of nuclear RNAs in the mouse brain</a>.</li>



<li><a href="https://sites.santafe.edu/~moore/pubs/FlowsMaps.pdf">Cris Moore's conjecture #5 in this 1998 paper</a>.</li>



<li>(The Gallistel book): <a href="https://onlinelibrary.wiley.com/doi/book/10.1002/9781444310498">Memory and the Computational Brain: Why Cognitive Science Will Transform Neuroscience</a>.</li>
</ul>
</li>



<li>Related episodes
<ul class="wp-block-list">
<li><a href="https://braininspired.co/podcast/126/">BI 126 Randy Gallistel: Where Is the Engram?</a></li>



<li><a href="https://braininspired.co/podcast/172/">BI 172 David Glanzman: Memory All The Way Down</a></li>
</ul>
</li>
</ul>



<p>Read the <a href="https://www.thetransmitter.org/wp-content/uploads/2024/11/BI-199-transcript.pdf" target="_blank" rel="noreferrer noopener">transcript</a>. </p>



<p>0:00 - Intro
4:44 - Hessam's background
11:50 - Randy Gallistel's book
14:43 - Information in the brain
17:51 - Hessam's turn to universal computation
35:30 - AI and universal computation
40:09 - Universa...</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Support the show to get full episodes and join the Discord community.











The Transmitter is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.



Read more about our partnership.



Sign up for the “Brain Inspired” email alerts to be notified every time a new “Brain Inspired” episode is released: https://www.thetransmitter.org/newsletters/



To explore more neuroscience news and perspectives, visit thetransmitter.org.



Hessam Akhlaghpour is a postdoctoral researcher at Rockefeller University in the Maimon lab. His experimental work is in fly neuroscience mostly studying spatial memories in fruit flies. However, we are going to be talking about a different (although somewhat related) side of his postdoctoral research. This aspect of his work involves theoretical explorations of molecular computation, which are deeply inspired by Randy Gallistel and Adam King's book Memory and the Computational Brain. Randy has been on the podcast before to discuss his ideas that memory needs to be stored in something more stable than the synapses between neurons, and how that something could be genetic material like RNA. When Hessam read this book, he was re-inspired to think of the brain the way he used to think of it before experimental neuroscience challenged his views. It re-inspired him to think of the brain as a computational system. But it also led to what we discuss today, the idea that RNA has the capacity for universal computation, and Hessam's development of how that might happen. So we discuss that background and story, why universal computation has been discovered in organisms yet since surely evolution has stumbled upon it, and how RNA might and combinatory logic could implement universal computation in nature.




Hessam's website.



Maimon Lab.



Twitter: @theHessam.



Related papers

An RNA-based theory of natural universal computation.



The molecular memory code and synaptic plasticity: a synthesis.



Lifelong persistence of nuclear RNAs in the mouse brain.



Cris Moore's conjecture #5 in this 1998 paper.



(The Gallistel book): Memory and the Computational Brain: Why Cognitive Science Will Transform Neuroscience.





Related episodes

BI 126 Randy Gallistel: Where Is the Engram?



BI 172 David Glanzman: Memory All The Way Down






Read the transcript. 



0:00 - Intro
4:44 - Hessam's background
11:50 - Randy Gallistel's book
14:43 - Information in the brain
17:51 - Hessam's turn to universal computation
35:30 - AI and universal computation
40:09 - Universa...]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 199 Hessam Akhlaghpour: Natural Universal Computation]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>







<p><em>The Transmitter </em>is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.</p>



<p>Read more about <a href="https://www.thetransmitter.org/partners/">our partnership</a>.</p>



<p>Sign up for the “Brain Inspired” email alerts to be notified every time a new “Brain Inspired” episode is released: <a href="https://www.thetransmitter.org/newsletters/">https://www.thetransmitter.org/newsletters/</a></p>



<p>To explore more neuroscience news and perspectives, visit <a href="http://thetransmitter.org">thetransmitter.org</a>.</p>



<p>Hessam Akhlaghpour is a postdoctoral researcher at Rockefeller University in the Maimon lab. His experimental work is in fly neuroscience mostly studying spatial memories in fruit flies. However, we are going to be talking about a different (although somewhat related) side of his postdoctoral research. This aspect of his work involves theoretical explorations of molecular computation, which are deeply inspired by Randy Gallistel and Adam King's book Memory and the Computational Brain. Randy has been on the podcast before to discuss his ideas that memory needs to be stored in something more stable than the synapses between neurons, and how that something could be genetic material like RNA. When Hessam read this book, he was re-inspired to think of the brain the way he used to think of it before experimental neuroscience challenged his views. It re-inspired him to think of the brain as a computational system. But it also led to what we discuss today, the idea that RNA has the capacity for universal computation, and Hessam's development of how that might happen. So we discuss that background and story, why universal computation has been discovered in organisms yet since surely evolution has stumbled upon it, and how RNA might and combinatory logic could implement universal computation in nature.</p>



<ul class="wp-block-list">
<li><a href="https://www.akhlaghpour.info/">Hessam's website</a>.</li>



<li><a href="https://maimonlab.rockefeller.edu/">Maimon Lab</a>.</li>



<li>Twitter: <a href="https://x.com/theHessam">@theHessam</a>.</li>



<li>Related papers
<ul class="wp-block-list">
<li><a href="https://www.sciencedirect.com/science/article/pii/S0022519321004045">An RNA-based theory of natural universal computation</a>.</li>



<li><a href="https://arxiv.org/abs/2209.04923">The molecular memory code and synaptic plasticity: a synthesis</a>.</li>



<li><a href="https://doi.org/10.1126/science.adf3481">Lifelong persistence of nuclear RNAs in the mouse brain</a>.</li>



<li><a href="https://sites.santafe.edu/~moore/pubs/FlowsMaps.pdf">Cris Moore's conjecture #5 in this 1998 paper</a>.</li>



<li>(The Gallistel book): <a href="https://onlinelibrary.wiley.com/doi/book/10.1002/9781444310498">Memory and the Computational Brain: Why Cognitive Science Will Transform Neuroscience</a>.</li>
</ul>
</li>



<li>Related episodes
<ul class="wp-block-list">
<li><a href="https://braininspired.co/podcast/126/">BI 126 Randy Gallistel: Where Is the Engram?</a></li>



<li><a href="https://braininspired.co/podcast/172/">BI 172 David Glanzman: Memory All The Way Down</a></li>
</ul>
</li>
</ul>



<p>Read the <a href="https://www.thetransmitter.org/wp-content/uploads/2024/11/BI-199-transcript.pdf" target="_blank" rel="noreferrer noopener">transcript</a>. </p>



<p>0:00 - Intro
4:44 - Hessam's background
11:50 - Randy Gallistel's book
14:43 - Information in the brain
17:51 - Hessam's turn to universal computation
35:30 - AI and universal computation
40:09 - Universal computation to solve intelligence
44:22 - Connecting sub and super molecular
50:10 - Junk DNA
56:42 - Genetic material for coding
1:06:37 - RNA and combinatory logic
1:35:14 - Outlook
1:42:11 - Reflecting on the molecular world</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/1908491/c1e-3g11t5o3nqc88g4j-jpjz344otp8-vc0h5n.mp3" length="105925571"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Support the show to get full episodes and join the Discord community.











The Transmitter is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.



Read more about our partnership.



Sign up for the “Brain Inspired” email alerts to be notified every time a new “Brain Inspired” episode is released: https://www.thetransmitter.org/newsletters/



To explore more neuroscience news and perspectives, visit thetransmitter.org.



Hessam Akhlaghpour is a postdoctoral researcher at Rockefeller University in the Maimon lab. His experimental work is in fly neuroscience mostly studying spatial memories in fruit flies. However, we are going to be talking about a different (although somewhat related) side of his postdoctoral research. This aspect of his work involves theoretical explorations of molecular computation, which are deeply inspired by Randy Gallistel and Adam King's book Memory and the Computational Brain. Randy has been on the podcast before to discuss his ideas that memory needs to be stored in something more stable than the synapses between neurons, and how that something could be genetic material like RNA. When Hessam read this book, he was re-inspired to think of the brain the way he used to think of it before experimental neuroscience challenged his views. It re-inspired him to think of the brain as a computational system. But it also led to what we discuss today, the idea that RNA has the capacity for universal computation, and Hessam's development of how that might happen. So we discuss that background and story, why universal computation has been discovered in organisms yet since surely evolution has stumbled upon it, and how RNA might and combinatory logic could implement universal computation in nature.




Hessam's website.



Maimon Lab.



Twitter: @theHessam.



Related papers

An RNA-based theory of natural universal computation.



The molecular memory code and synaptic plasticity: a synthesis.



Lifelong persistence of nuclear RNAs in the mouse brain.



Cris Moore's conjecture #5 in this 1998 paper.



(The Gallistel book): Memory and the Computational Brain: Why Cognitive Science Will Transform Neuroscience.





Related episodes

BI 126 Randy Gallistel: Where Is the Engram?



BI 172 David Glanzman: Memory All The Way Down






Read the transcript. 



0:00 - Intro
4:44 - Hessam's background
11:50 - Randy Gallistel's book
14:43 - Information in the brain
17:51 - Hessam's turn to universal computation
35:30 - AI and universal computation
40:09 - Universa...]]>
                </itunes:summary>
                                                                            <itunes:duration>01:49:07</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 198 Tony Zador: Neuroscience Principles to Improve AI]]>
                </title>
                <pubDate>Mon, 11 Nov 2024 15:28:00 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/1882283</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-198-tony-zador-neuroscience-principles-to-improve-ai</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p><em>The Transmitter</em> is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.</p>



<p>Read more about <a href="https://www.thetransmitter.org/partners/">our partnership</a>.</p>





<p>Sign up for the “Brain Inspired” email alerts to be notified every time a new “Brain Inspired” episode is released: <a href="https://www.thetransmitter.org/newsletters/">https://www.thetransmitter.org/newsletters/</a></p>



<p>To explore more neuroscience news and perspectives, visit thetransmitter.org.</p>



<p>Tony Zador runs the Zador lab at Cold Spring Harbor Laboratory. You've heard him on Brain Inspired a few times in the past, most recently in a panel discussion I moderated at this past COSYNE conference - a conference Tony co-founded 20 years ago. As you'll hear, Tony's current and past interests and research endeavors are of a wide variety, but today we focus mostly on his thoughts on NeuroAI.</p>



<p>We're in a huge AI hype cycle right now, for good reason, and there's a lot of talk in the neuroscience world about whether neuroscience has anything of value to provide AI engineers - and how much value, if any, neuroscience has provided in the past.</p>



<p>Tony is team neuroscience. You'll hear him discuss why in this episode, especially when it comes to ways in which development and evolution might inspire better data efficiency, looking to animals in general to understand how they coordinate numerous objective functions to achieve their intelligent behaviors - something Tony calls alignment - and using spikes in AI models to increase energy efficiency.</p>



<ul class="wp-block-list">
<li><a href="https://zadorlab.labsites.cshl.edu/">Zador Lab</a></li>



<li>Twitter: <a href="https://twitter.com/TonyZador">@TonyZador</a></li>



<li>Previous episodes:
<ul class="wp-block-list">
<li><a href="https://braininspired.co/podcast/187/">BI 187: COSYNE 2024 Neuro-AI Panel</a>.</li>



<li><a href="https://braininspired.co/podcast/125/">BI 125 Doris Tsao, Tony Zador, Blake Richards: NAISys</a></li>



<li><a href="https://braininspired.co/podcast/34/">BI 034 Tony Zador: How DNA and Evolution Can Inform AI</a></li>
</ul>
</li>



<li>Related papers
<ul class="wp-block-list">
<li><a href="https://zadorlab.labsites.cshl.edu/wp-content/uploads/sites/59/2024/08/Catalyzing-Next-Generation-Artificial-Intelligence-through-NeuroAI.pdf">Catalyzing next-generation Artificial Intelligence through NeuroAI</a>.</li>



<li><a href="https://zadorlab.labsites.cshl.edu/wp-content/uploads/sites/59/2024/09/shuvaev-et-al-2024-Encoding-innate-ability-through-a-genomic-bottleneck-.pdf">Encoding innate ability through a genomic bottleneck</a>.</li>
</ul>
</li>



<li>Essays
<ul class="wp-block-list">
<li><a href="https://www.thetransmitter.org/neuroai/neuroai-a-field-born-from-the-symbiosis-between-neuroscience-ai/">NeuroAI: A field born from the symbiosis between neuroscience, AI</a>.</li>



<li><a href="https://www.thetransmitter.org/neuroai/what-the-brain-can-teach-artificial-neural-networks/">What the brain can teach artificial neural networks</a>.</li>
</ul>
</li>
</ul>



<p>Read the <a href="https://www.thetransmitter.org/wp-content/uploads/2024/11/BI-198-transcript-final-1.pdf" target="_blank" rel="noreferrer noopener">transcript</a>.</p>



<p>0:00 - Intro
3:28 - "Neuro-AI"
12:48 - Visual cognition history
18:24 - Information theory in neuroscience
20:47 - Necessary steps for progress
24:34 - Neuro-AI models and cognition
35:47 - Animals for inspiring AI
41:48 - What we want AI to do
46:01 - Developme...</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Support the show to get full episodes and join the Discord community.









The Transmitter is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.



Read more about our partnership.





Sign up for the “Brain Inspired” email alerts to be notified every time a new “Brain Inspired” episode is released: https://www.thetransmitter.org/newsletters/



To explore more neuroscience news and perspectives, visit thetransmitter.org.



Tony Zador runs the Zador lab at Cold Spring Harbor Laboratory. You've heard him on Brain Inspired a few times in the past, most recently in a panel discussion I moderated at this past COSYNE conference - a conference Tony co-founded 20 years ago. As you'll hear, Tony's current and past interests and research endeavors are of a wide variety, but today we focus mostly on his thoughts on NeuroAI.



We're in a huge AI hype cycle right now, for good reason, and there's a lot of talk in the neuroscience world about whether neuroscience has anything of value to provide AI engineers - and how much value, if any, neuroscience has provided in the past.



Tony is team neuroscience. You'll hear him discuss why in this episode, especially when it comes to ways in which development and evolution might inspire better data efficiency, looking to animals in general to understand how they coordinate numerous objective functions to achieve their intelligent behaviors - something Tony calls alignment - and using spikes in AI models to increase energy efficiency.




Zador Lab



Twitter: @TonyZador



Previous episodes:

BI 187: COSYNE 2024 Neuro-AI Panel.



BI 125 Doris Tsao, Tony Zador, Blake Richards: NAISys



BI 034 Tony Zador: How DNA and Evolution Can Inform AI





Related papers

Catalyzing next-generation Artificial Intelligence through NeuroAI.



Encoding innate ability through a genomic bottleneck.





Essays

NeuroAI: A field born from the symbiosis between neuroscience, AI.



What the brain can teach artificial neural networks.






Read the transcript.



0:00 - Intro
3:28 - "Neuro-AI"
12:48 - Visual cognition history
18:24 - Information theory in neuroscience
20:47 - Necessary steps for progress
24:34 - Neuro-AI models and cognition
35:47 - Animals for inspiring AI
41:48 - What we want AI to do
46:01 - Developme...]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 198 Tony Zador: Neuroscience Principles to Improve AI]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p><em>The Transmitter</em> is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.</p>



<p>Read more about <a href="https://www.thetransmitter.org/partners/">our partnership</a>.</p>





<p>Sign up for the “Brain Inspired” email alerts to be notified every time a new “Brain Inspired” episode is released: <a href="https://www.thetransmitter.org/newsletters/">https://www.thetransmitter.org/newsletters/</a></p>



<p>To explore more neuroscience news and perspectives, visit thetransmitter.org.</p>



<p>Tony Zador runs the Zador lab at Cold Spring Harbor Laboratory. You've heard him on Brain Inspired a few times in the past, most recently in a panel discussion I moderated at this past COSYNE conference - a conference Tony co-founded 20 years ago. As you'll hear, Tony's current and past interests and research endeavors are of a wide variety, but today we focus mostly on his thoughts on NeuroAI.</p>



<p>We're in a huge AI hype cycle right now, for good reason, and there's a lot of talk in the neuroscience world about whether neuroscience has anything of value to provide AI engineers - and how much value, if any, neuroscience has provided in the past.</p>



<p>Tony is team neuroscience. You'll hear him discuss why in this episode, especially when it comes to ways in which development and evolution might inspire better data efficiency, looking to animals in general to understand how they coordinate numerous objective functions to achieve their intelligent behaviors - something Tony calls alignment - and using spikes in AI models to increase energy efficiency.</p>



<ul class="wp-block-list">
<li><a href="https://zadorlab.labsites.cshl.edu/">Zador Lab</a></li>



<li>Twitter: <a href="https://twitter.com/TonyZador">@TonyZador</a></li>



<li>Previous episodes:
<ul class="wp-block-list">
<li><a href="https://braininspired.co/podcast/187/">BI 187: COSYNE 2024 Neuro-AI Panel</a>.</li>



<li><a href="https://braininspired.co/podcast/125/">BI 125 Doris Tsao, Tony Zador, Blake Richards: NAISys</a></li>



<li><a href="https://braininspired.co/podcast/34/">BI 034 Tony Zador: How DNA and Evolution Can Inform AI</a></li>
</ul>
</li>



<li>Related papers
<ul class="wp-block-list">
<li><a href="https://zadorlab.labsites.cshl.edu/wp-content/uploads/sites/59/2024/08/Catalyzing-Next-Generation-Artificial-Intelligence-through-NeuroAI.pdf">Catalyzing next-generation Artificial Intelligence through NeuroAI</a>.</li>



<li><a href="https://zadorlab.labsites.cshl.edu/wp-content/uploads/sites/59/2024/09/shuvaev-et-al-2024-Encoding-innate-ability-through-a-genomic-bottleneck-.pdf">Encoding innate ability through a genomic bottleneck</a>.</li>
</ul>
</li>



<li>Essays
<ul class="wp-block-list">
<li><a href="https://www.thetransmitter.org/neuroai/neuroai-a-field-born-from-the-symbiosis-between-neuroscience-ai/">NeuroAI: A field born from the symbiosis between neuroscience, AI</a>.</li>



<li><a href="https://www.thetransmitter.org/neuroai/what-the-brain-can-teach-artificial-neural-networks/">What the brain can teach artificial neural networks</a>.</li>
</ul>
</li>
</ul>



<p>Read the <a href="https://www.thetransmitter.org/wp-content/uploads/2024/11/BI-198-transcript-final-1.pdf" target="_blank" rel="noreferrer noopener">transcript</a>.</p>



<p>0:00 - Intro
3:28 - "Neuro-AI"
12:48 - Visual cognition history
18:24 - Information theory in neuroscience
20:47 - Necessary steps for progress
24:34 - Neuro-AI models and cognition
35:47 - Animals for inspiring AI
41:48 - What we want AI to do
46:01 - Development and AI
59:03 - Robots
1:25:10 - Catalyzing the next generation of AI</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/1882283/c1e-5k88hm3g4vtz5467-qd40dwozb8od-gvwrkx.mp3" length="92314860"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Support the show to get full episodes and join the Discord community.









The Transmitter is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.



Read more about our partnership.





Sign up for the “Brain Inspired” email alerts to be notified every time a new “Brain Inspired” episode is released: https://www.thetransmitter.org/newsletters/



To explore more neuroscience news and perspectives, visit thetransmitter.org.



Tony Zador runs the Zador lab at Cold Spring Harbor Laboratory. You've heard him on Brain Inspired a few times in the past, most recently in a panel discussion I moderated at this past COSYNE conference - a conference Tony co-founded 20 years ago. As you'll hear, Tony's current and past interests and research endeavors are of a wide variety, but today we focus mostly on his thoughts on NeuroAI.



We're in a huge AI hype cycle right now, for good reason, and there's a lot of talk in the neuroscience world about whether neuroscience has anything of value to provide AI engineers - and how much value, if any, neuroscience has provided in the past.



Tony is team neuroscience. You'll hear him discuss why in this episode, especially when it comes to ways in which development and evolution might inspire better data efficiency, looking to animals in general to understand how they coordinate numerous objective functions to achieve their intelligent behaviors - something Tony calls alignment - and using spikes in AI models to increase energy efficiency.




Zador Lab



Twitter: @TonyZador



Previous episodes:

BI 187: COSYNE 2024 Neuro-AI Panel.



BI 125 Doris Tsao, Tony Zador, Blake Richards: NAISys



BI 034 Tony Zador: How DNA and Evolution Can Inform AI





Related papers

Catalyzing next-generation Artificial Intelligence through NeuroAI.



Encoding innate ability through a genomic bottleneck.





Essays

NeuroAI: A field born from the symbiosis between neuroscience, AI.



What the brain can teach artificial neural networks.






Read the transcript.



0:00 - Intro
3:28 - "Neuro-AI"
12:48 - Visual cognition history
18:24 - Information theory in neuroscience
20:47 - Necessary steps for progress
24:34 - Neuro-AI models and cognition
35:47 - Animals for inspiring AI
41:48 - What we want AI to do
46:01 - Developme...]]>
                </itunes:summary>
                                                                            <itunes:duration>01:35:04</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 197 Karen Adolph: How Babies Learn to Move and Think]]>
                </title>
                <pubDate>Fri, 25 Oct 2024 04:00:00 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/1867047</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-197-karen-adolph-how-babies-learn-to-move-and-think-1</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>



<p><em>The Transmitter</em> is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.</p>



<p>Read more about <a href="https://www.thetransmitter.org/partners/">our partnership</a>.</p>





<p>Sign up for the <a href="https://www.thetransmitter.org/newsletters/">“Brain Inspired” email alerts</a> to be notified every time a new “Brain Inspired” episode is released<a href="https://www.thetransmitter.org/newsletters/">.</a></p>



<p>To explore more neuroscience news and perspectives, visit <a href="http://thetransmitter.org">thetransmitter.org</a>.</p>



<p>Karen Adolph runs the <a href="https://www.nyuactionlab.com/">Infant Action Lab</a> at NYU, where she studies how our motor behaviors develop from infancy onward. We discuss how observing babies at different stages of development illuminates how movement and cognition develop in humans, how variability and embodiment are key to that development, and the importance of studying behavior in real-world settings as opposed to restricted laboratory settings. We also explore how these principles and simulations can inspire advances in intelligent robots. Karen has a long-standing interest in ecological psychology, and she shares some stories of her time studying under Eleanor Gibson and other mentors.</p>





<p>Finally, we get a surprise visit from her partner Mark Blumberg, with whom she co-authored an opinion piece arguing that "motor cortex" doesn't start off with a motor function, oddly enough, but instead processes sensory information during the first period of animals' lives.</p>



<ul class="wp-block-list">
<li><a href="https://www.nyuactionlab.com/">Infant Action Lab</a> (Karen Adolph's lab)</li>



<li><a href="https://blumberg.lab.uiowa.edu/">Sleep and Behavioral Development Lab</a> (Mark Blumberg's lab)</li>



<li>Related papers
<ul class="wp-block-list">
<li><a href="https://www.annualreviews.org/content/journals/10.1146/annurev-psych-010418-102836">Motor Development: Embodied, Embedded, Enculturated, and Enabling</a></li>



<li><a href="https://www.jstor.org/stable/26871620">An Ecological Approach to Learning in (Not and) Development</a></li>



<li><a href="https://75f52ccb-a7bc-4767-bded-13af6094bc0e.usrfiles.com/ugd/75f52c_cf43c0b3cdf9496791bf515f54d4b7cc.pdf">An update of the development of motor behavior</a></li>



<li><a href="https://drive.google.com/file/d/1QjNdXakZAZyy0FsuBWoinzDQMi5uuz3j/view">Protracted development of motor cortex constrains rich interpretations of infant cognition</a></li>
</ul>
</li>
</ul>



<p>Read the <a href="https://www.thetransmitter.org/wp-content/uploads/2024/10/BI-197-final-1.pdf" target="_blank" rel="noreferrer noopener">transcript</a>.</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Support the show to get full episodes and join the Discord community.







The Transmitter is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.



Read more about our partnership.





Sign up for the “Brain Inspired” email alerts to be notified every time a new “Brain Inspired” episode is released.



To explore more neuroscience news and perspectives, visit thetransmitter.org.



Karen Adolph runs the Infant Action Lab at NYU, where she studies how our motor behaviors develop from infancy onward. We discuss how observing babies at different stages of development illuminates how movement and cognition develop in humans, how variability and embodiment are key to that development, and the importance of studying behavior in real-world settings as opposed to restricted laboratory settings. We also explore how these principles and simulations can inspire advances in intelligent robots. Karen has a long-standing interest in ecological psychology, and she shares some stories of her time studying under Eleanor Gibson and other mentors.





Finally, we get a surprise visit from her partner Mark Blumberg, with whom she co-authored an opinion piece arguing that "motor cortex" doesn't start off with a motor function, oddly enough, but instead processes sensory information during the first period of animals' lives.




Infant Action Lab (Karen Adolph's lab)



Sleep and Behavioral Development Lab (Mark Blumberg's lab)



Related papers

Motor Development: Embodied, Embedded, Enculturated, and Enabling



An Ecological Approach to Learning in (Not and) Development



An update of the development of motor behavior



Protracted development of motor cortex constrains rich interpretations of infant cognition






Read the transcript.]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 197 Karen Adolph: How Babies Learn to Move and Think]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>



<p><em>The Transmitter</em> is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.</p>



<p>Read more about <a href="https://www.thetransmitter.org/partners/">our partnership</a>.</p>





<p>Sign up for the <a href="https://www.thetransmitter.org/newsletters/">“Brain Inspired” email alerts</a> to be notified every time a new “Brain Inspired” episode is released<a href="https://www.thetransmitter.org/newsletters/">.</a></p>



<p>To explore more neuroscience news and perspectives, visit <a href="http://thetransmitter.org">thetransmitter.org</a>.</p>



<p>Karen Adolph runs the <a href="https://www.nyuactionlab.com/">Infant Action Lab</a> at NYU, where she studies how our motor behaviors develop from infancy onward. We discuss how observing babies at different stages of development illuminates how movement and cognition develop in humans, how variability and embodiment are key to that development, and the importance of studying behavior in real-world settings as opposed to restricted laboratory settings. We also explore how these principles and simulations can inspire advances in intelligent robots. Karen has a long-standing interest in ecological psychology, and she shares some stories of her time studying under Eleanor Gibson and other mentors.</p>





<p>Finally, we get a surprise visit from her partner Mark Blumberg, with whom she co-authored an opinion piece arguing that "motor cortex" doesn't start off with a motor function, oddly enough, but instead processes sensory information during the first period of animals' lives.</p>



<ul class="wp-block-list">
<li><a href="https://www.nyuactionlab.com/">Infant Action Lab</a> (Karen Adolph's lab)</li>



<li><a href="https://blumberg.lab.uiowa.edu/">Sleep and Behavioral Development Lab</a> (Mark Blumberg's lab)</li>



<li>Related papers
<ul class="wp-block-list">
<li><a href="https://www.annualreviews.org/content/journals/10.1146/annurev-psych-010418-102836">Motor Development: Embodied, Embedded, Enculturated, and Enabling</a></li>



<li><a href="https://www.jstor.org/stable/26871620">An Ecological Approach to Learning in (Not and) Development</a></li>



<li><a href="https://75f52ccb-a7bc-4767-bded-13af6094bc0e.usrfiles.com/ugd/75f52c_cf43c0b3cdf9496791bf515f54d4b7cc.pdf">An update of the development of motor behavior</a></li>



<li><a href="https://drive.google.com/file/d/1QjNdXakZAZyy0FsuBWoinzDQMi5uuz3j/view">Protracted development of motor cortex constrains rich interpretations of infant cognition</a></li>
</ul>
</li>
</ul>



<p>Read the <a href="https://www.thetransmitter.org/wp-content/uploads/2024/10/BI-197-final-1.pdf" target="_blank" rel="noreferrer noopener">transcript</a>.</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/1867047/c1e-5k88hm6n06bn10n9-5zkd7xkwuzr-o2f9qm.mp3" length="86222937"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Support the show to get full episodes and join the Discord community.







The Transmitter is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.



Read more about our partnership.





Sign up for the “Brain Inspired” email alerts to be notified every time a new “Brain Inspired” episode is released.



To explore more neuroscience news and perspectives, visit thetransmitter.org.



Karen Adolph runs the Infant Action Lab at NYU, where she studies how our motor behaviors develop from infancy onward. We discuss how observing babies at different stages of development illuminates how movement and cognition develop in humans, how variability and embodiment are key to that development, and the importance of studying behavior in real-world settings as opposed to restricted laboratory settings. We also explore how these principles and simulations can inspire advances in intelligent robots. Karen has a long-standing interest in ecological psychology, and she shares some stories of her time studying under Eleanor Gibson and other mentors.





Finally, we get a surprise visit from her partner Mark Blumberg, with whom she co-authored an opinion piece arguing that "motor cortex" doesn't start off with a motor function, oddly enough, but instead processes sensory information during the first period of animals' lives.




Infant Action Lab (Karen Adolph's lab)



Sleep and Behavioral Development Lab (Mark Blumberg's lab)



Related papers

Motor Development: Embodied, Embedded, Enculturated, and Enabling



An Ecological Approach to Learning in (Not and) Development



An update of the development of motor behavior



Protracted development of motor cortex constrains rich interpretations of infant cognition






Read the transcript.]]>
                </itunes:summary>
                                                                            <itunes:duration>01:29:31</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 196 Cristina Savin and Tim Vogels with Gaute Einevoll and Mikkel Lepperød]]>
                </title>
                <pubDate>Fri, 11 Oct 2024 04:00:00 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/1853735</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-196-cristina-savin-and-tim-vogels-with-gaute-einevoll-and-mikkel-lepperod-1</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p><em>The Transmitter </em>is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit <a href="http://thetransmitter.org/">thetransmitter.org</a> to explore the latest neuroscience news and perspectives, written by journalists and scientists. </p>





<p>This is the second conversation I had while teamed up with Gaute Einevoll at <a href="https://www.mn.uio.no/ccse/english/about/news-and-events/news/navigating-the-future-of-neuroai.html">a workshop on NeuroAI</a> in Norway. In this episode, Gaute and I are joined by <a href="https://csavin.wixsite.com/savinlab">Cristina Savin</a> and <a href="https://vogelslab.org/people/">Tim Vogels</a>. Cristina shares how her lab uses recurrent neural networks to study learning, while Tim talks about his long-standing research on synaptic plasticity and how AI tools are now helping to explore the vast space of possible plasticity rules.</p>



<p>We touch on how deep learning has changed the landscape, enhancing our research but also creating challenges with the "fashion-driven" nature of science today. We also reflect on how these new tools have changed the way we think about brain function without fundamentally altering the structure of our questions.</p>



<p>Be sure to check out Gaute's <a href="https://theoreticalneuroscience.no/">Theoretical Neuroscience</a> podcast as well!</p>







<ul class="wp-block-list">
<li><a href="https://lepmik.github.io/">Mikkel Lepperød</a></li>



<li><a href="https://csavin.wixsite.com/savinlab">Cristina Savin</a></li>



<li><a href="https://vogelslab.org/people/">Tim Vogels</a>
<ul class="wp-block-list">
<li>Twitter: <a href="https://x.com/tpvogels">@TPVogels</a></li>
</ul>
</li>



<li><a href="https://www.mn.uio.no/fysikk/english/people/aca/geinevol/index.html">Gaute Einevoll</a>
<ul class="wp-block-list">
<li>Twitter: <a href="https://x.com/GauteEinevoll">@GauteEinevoll</a></li>



<li>Gaute's <a href="https://theoreticalneuroscience.no/">Theoretical Neuroscience</a> podcast.</li>
</ul>
</li>



<li><a href="https://www.mn.uio.no/ccse/english/about/news-and-events/events/2024/neuro-ai-workshop.html" target="_blank" rel="noreferrer noopener">Validating models: How would success in NeuroAI look like?</a></li>
</ul>



<p><a href="https://www.thetransmitter.org/wp-content/uploads/2024/10/BI-196-final.pdf">Read the transcript</a>, provided by <em>The Transmitter.</em></p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Support the show to get full episodes and join the Discord community.









The Transmitter is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists. 





This is the second conversation I had while teamed up with Gaute Einevoll at a workshop on NeuroAI in Norway. In this episode, Gaute and I are joined by Cristina Savin and Tim Vogels. Cristina shares how her lab uses recurrent neural networks to study learning, while Tim talks about his long-standing research on synaptic plasticity and how AI tools are now helping to explore the vast space of possible plasticity rules.



We touch on how deep learning has changed the landscape, enhancing our research but also creating challenges with the "fashion-driven" nature of science today. We also reflect on how these new tools have changed the way we think about brain function without fundamentally altering the structure of our questions.



Be sure to check out Gaute's Theoretical Neuroscience podcast as well!








Mikkel Lepperød



Cristina Savin



Tim Vogels

Twitter: @TPVogels





Gaute Einevoll

Twitter: @GauteEinevoll



Gaute's Theoretical Neuroscience podcast.





Validating models: How would success in NeuroAI look like?




Read the transcript, provided by The Transmitter.]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 196 Cristina Savin and Tim Vogels with Gaute Einevoll and Mikkel Lepperød]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p><em>The Transmitter </em>is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit <a href="http://thetransmitter.org/">thetransmitter.org</a> to explore the latest neuroscience news and perspectives, written by journalists and scientists. </p>





<p>This is the second conversation I had while teamed up with Gaute Einevoll at <a href="https://www.mn.uio.no/ccse/english/about/news-and-events/news/navigating-the-future-of-neuroai.html">a workshop on NeuroAI</a> in Norway. In this episode, Gaute and I are joined by <a href="https://csavin.wixsite.com/savinlab">Cristina Savin</a> and <a href="https://vogelslab.org/people/">Tim Vogels</a>. Cristina shares how her lab uses recurrent neural networks to study learning, while Tim talks about his long-standing research on synaptic plasticity and how AI tools are now helping to explore the vast space of possible plasticity rules.</p>



<p>We touch on how deep learning has changed the landscape, enhancing our research but also creating challenges with the "fashion-driven" nature of science today. We also reflect on how these new tools have changed the way we think about brain function without fundamentally altering the structure of our questions.</p>



<p>Be sure to check out Gaute's <a href="https://theoreticalneuroscience.no/">Theoretical Neuroscience</a> podcast as well!</p>







<ul class="wp-block-list">
<li><a href="https://lepmik.github.io/">Mikkel Lepperød</a></li>



<li><a href="https://csavin.wixsite.com/savinlab">Cristina Savin</a></li>



<li><a href="https://vogelslab.org/people/">Tim Vogels</a>
<ul class="wp-block-list">
<li>Twitter: <a href="https://x.com/tpvogels">@TPVogels</a></li>
</ul>
</li>



<li><a href="https://www.mn.uio.no/fysikk/english/people/aca/geinevol/index.html">Gaute Einevoll</a>
<ul class="wp-block-list">
<li>Twitter: <a href="https://x.com/GauteEinevoll">@GauteEinevoll</a></li>



<li>Gaute's <a href="https://theoreticalneuroscience.no/">Theoretical Neuroscience</a> podcast.</li>
</ul>
</li>



<li><a href="https://www.mn.uio.no/ccse/english/about/news-and-events/events/2024/neuro-ai-workshop.html" target="_blank" rel="noreferrer noopener">Validating models: How would success in NeuroAI look like?</a></li>
</ul>



<p><a href="https://www.thetransmitter.org/wp-content/uploads/2024/10/BI-196-final.pdf">Read the transcript</a>, provided by <em>The Transmitter.</em></p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/1853735/c1e-m1mmsn70qrbo4xk8-6zww84xqc55x-qb2kpq.mp3" length="90966565"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Support the show to get full episodes and join the Discord community.









The Transmitter is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists. 





This is the second conversation I had while teamed up with Gaute Einevoll at a workshop on NeuroAI in Norway. In this episode, Gaute and I are joined by Cristina Savin and Tim Vogels. Cristina shares how her lab uses recurrent neural networks to study learning, while Tim talks about his long-standing research on synaptic plasticity and how AI tools are now helping to explore the vast space of possible plasticity rules.



We touch on how deep learning has changed the landscape, enhancing our research but also creating challenges with the "fashion-driven" nature of science today. We also reflect on how these new tools have changed the way we think about brain function without fundamentally altering the structure of our questions.



Be sure to check out Gaute's Theoretical Neuroscience podcast as well!








Mikkel Lepperød



Cristina Savin



Tim Vogels

Twitter: @TPVogels





Gaute Einevoll

Twitter: @GauteEinevoll



Gaute's Theoretical Neuroscience podcast.





Validating models: How would success in NeuroAI look like?




Read the transcript, provided by The Transmitter.]]>
                </itunes:summary>
                                                                            <itunes:duration>01:19:40</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 195 Ken Harris and Andreas Tolias with Gaute Einevoll and Mikkel Lepperød]]>
                </title>
                <pubDate>Tue, 08 Oct 2024 04:00:00 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/1852793</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-195-ken-harris-and-andreas-tolias-with-gaute-einevoll-and-mikkel-lepperod</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>



<p><em>The Transmitter </em>is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit <a href="http://thetransmitter.org/">thetransmitter.org</a> to explore the latest neuroscience news and perspectives, written by journalists and scientists.</p>





<p>This is the first of two less usual episodes. I was recently in Norway at a NeuroAI workshop called <a href="https://www.mn.uio.no/ccse/english/about/news-and-events/news/navigating-the-future-of-neuroai.html" target="_blank" rel="noreferrer noopener">Validating models: How would success in NeuroAI look like?</a> What follows are a few recordings I made with my friend Gaute Einevoll. Gaute has <a href="https://braininspired.co/podcast/148/">been on this podcast before</a>, but more importantly he started his own podcast a while back called <a href="https://theoreticalneuroscience.no/">Theoretical Neuroscience</a>, which you should check out.</p>



<p>Gaute and I introduce the episode, then briefly speak with <a href="https://www.mn.uio.no/ibv/english/people/aca/bjornmik/index.html">Mikkel Lepperød</a>, one of the organizers of the workshop. In this first episode, we're then joined by <a href="https://www.ucl.ac.uk/cortexlab/">Ken Harris</a> and <a href="https://toliaslab.org/">Andreas Tolias</a> to discuss how AI has influenced their research, thoughts about brains and minds, and progress and productivity.</p>



<ul class="wp-block-list">
<li><a href="https://www.mn.uio.no/ccse/english/about/news-and-events/events/2024/neuro-ai-workshop.html">Validating models: How would success in NeuroAI look like?</a></li>



<li><a href="https://lepmik.github.io/">Mikkel Lepperød</a></li>



<li><a href="https://toliaslab.org/">Andreas Tolias</a>
<ul class="wp-block-list">
<li>Twitter: <a href="https://x.com/AToliasLab">@AToliasLab</a></li>
</ul>
</li>



<li><a href="https://www.ucl.ac.uk/cortexlab/">Ken Harris</a>
<ul class="wp-block-list">
<li>Twitter: <a href="https://x.com/kennethd_harris">@kennethd_harris</a></li>
</ul>
</li>



<li><a href="https://www.mn.uio.no/fysikk/english/people/aca/geinevol/index.html">Gaute Einevoll</a>
<ul class="wp-block-list">
<li>Twitter: <a href="https://x.com/GauteEinevoll">@GauteEinevoll</a></li>



<li>Gaute's <a href="https://theoreticalneuroscience.no/">Theoretical Neuroscience</a> podcast.</li>
</ul>
</li>
</ul>



<p><a href="https://www.thetransmitter.org/wp-content/uploads/2024/10/BI-195-transcript-final_REVISED.pdf">Read the transcript</a>, provided by <em>The Transmitter</em>.</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Support the show to get full episodes and join the Discord community.







The Transmitter is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.





This is the first of two less usual episodes. I was recently in Norway at a NeuroAI workshop called Validating models: How would success in NeuroAI look like? What follows are a few recordings I made with my friend Gaute Einevoll. Gaute has been on this podcast before, but more importantly he started his own podcast a while back called Theoretical Neuroscience, which you should check out.



Gaute and I introduce the episode, then briefly speak with Mikkel Lepperød, one of the organizers of the workshop. In this first episode, we're then joined by Ken Harris and Andreas Tolias to discuss how AI has influenced their research, thoughts about brains and minds, and progress and productivity.




Validating models: How would success in NeuroAI look like?



Mikkel Lepperød



Andreas Tolias

Twitter: @AToliasLab





Ken Harris

Twitter: @kennethd_harris





Gaute Einevoll

Twitter: @GauteEinevoll



Gaute's Theoretical Neuroscience podcast.






Read the transcript, provided by The Transmitter.]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 195 Ken Harris and Andreas Tolias with Gaute Einevoll and Mikkel Lepperød]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>



<p><em>The Transmitter </em>is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit <a href="http://thetransmitter.org/">thetransmitter.org</a> to explore the latest neuroscience news and perspectives, written by journalists and scientists.</p>





<p>This is the first of two less usual episodes. I was recently in Norway at a NeuroAI workshop called <a href="https://www.mn.uio.no/ccse/english/about/news-and-events/news/navigating-the-future-of-neuroai.html" target="_blank" rel="noreferrer noopener">Validating models: How would success in NeuroAI look like?</a> What follows are a few recordings I made with my friend Gaute Einevoll. Gaute has <a href="https://braininspired.co/podcast/148/">been on this podcast before</a>, but more importantly he started his own podcast a while back called <a href="https://theoreticalneuroscience.no/">Theoretical Neuroscience</a>, which you should check out.</p>



<p>Gaute and I introduce the episode, then briefly speak with <a href="https://www.mn.uio.no/ibv/english/people/aca/bjornmik/index.html">Mikkel Lepperød</a>, one of the organizers of the workshop. In this first episode, we're then joined by <a href="https://www.ucl.ac.uk/cortexlab/">Ken Harris</a> and <a href="https://toliaslab.org/">Andreas Tolias</a> to discuss how AI has influenced their research, thoughts about brains and minds, and progress and productivity.</p>



<ul class="wp-block-list">
<li><a href="https://www.mn.uio.no/ccse/english/about/news-and-events/events/2024/neuro-ai-workshop.html">Validating models: How would success in NeuroAI look like?</a></li>



<li><a href="https://lepmik.github.io/">Mikkel Lepperød</a></li>



<li><a href="https://toliaslab.org/">Andreas Tolias</a>
<ul class="wp-block-list">
<li>Twitter: <a href="https://x.com/AToliasLab">@AToliasLab</a></li>
</ul>
</li>



<li><a href="https://www.ucl.ac.uk/cortexlab/">Ken Harris</a>
<ul class="wp-block-list">
<li>Twitter: <a href="https://x.com/kennethd_harris">@kennethd_harris</a></li>
</ul>
</li>



<li><a href="https://www.mn.uio.no/fysikk/english/people/aca/geinevol/index.html">Gaute Einevoll</a>
<ul class="wp-block-list">
<li>Twitter: <a href="https://x.com/GauteEinevoll">@GauteEinevoll</a></li>



<li>Gaute's <a href="https://theoreticalneuroscience.no/">Theoretical Neuroscience</a> podcast.</li>
</ul>
</li>
</ul>



<p><a href="https://www.thetransmitter.org/wp-content/uploads/2024/10/BI-195-transcript-final_REVISED.pdf">Read the transcript</a>, provided by <em>The Transmitter</em>.</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/1852793/c1e-9077snr95vs09z56-wwmm77ppc915-ky96zb.mp3" length="74292660"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Support the show to get full episodes and join the Discord community.







The Transmitter is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists.





This is the first of two less usual episodes. I was recently in Norway at a NeuroAI workshop called Validating models: How would success in NeuroAI look like? What follows are a few recordings I made with my friend Gaute Einevoll. Gaute has been on this podcast before, but more importantly he started his own podcast a while back called Theoretical Neuroscience, which you should check out.



Gaute and I introduce the episode, then briefly speak with Mikkel Lepperød, one of the organizers of the workshop. In this first episode, we're then joined by Ken Harris and Andreas Tolias to discuss how AI has influenced their research, thoughts about brains and minds, and progress and productivity.




Validating models: How would success in NeuroAI look like?



Mikkel Lepperød



Andreas Tolias

Twitter: @AToliasLab





Ken Harris

Twitter: @kennethd_harris





Gaute Einevoll

Twitter: @GauteEinevoll



Gaute's Theoretical Neuroscience podcast.






Read the transcript, provided by The Transmitter.]]>
                </itunes:summary>
                                                                            <itunes:duration>01:17:05</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 194 Vijay Namboodiri & Ali Mohebi: Dopamine Keeps Getting More Interesting]]>
                </title>
                <pubDate>Fri, 27 Sep 2024 04:14:00 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/1843163</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-194-vijay-namboodiri-amp-ali-mohebi-dopamine-keeps-getting-more-interesting</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>




https://youtu.be/lbKEOdbeqHo




<p><em>The Transmitter </em>is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit <a href="http://thetransmitter.org/">thetransmitter.org</a> to explore the latest neuroscience news and perspectives, written by journalists and scientists. </p>











<p>The Transmitter has provided a transcript for this episode.</p>



<p>Vijay Namoodiri runs the Nam Lab at the University of California San Francisco, and Ali Mojebi is an assistant professor at the University of Wisconsin-Madison. Ali as been on the podcast before a few times, and he's interested in how neuromodulators like dopamine affect our cognition. And it was Ali who pointed me to Vijay, because of some recent work Vijay has done reassessing how dopamine might function differently than what has become the classic story of dopamine's function as it pertains to learning. The classic story is that dopamine is related to reward prediction errors. That is, dopamine is modulated when you expect reward and don't get it, and/or when you don't expect reward but do get it. Vijay calls this a "prospective" account of dopamine function, since it requires an animal to look into the future to expect a reward. Vijay has shown, however, that a retrospective account of dopamine might better explain lots of know behavioral data. This retrospective account links dopamine to how we understand causes and effects in our ongoing behavior. So in this episode, Vijay gives us a history lesson about dopamine, his newer story and why it has caused a bit of controversy, and how all of this came to be.</p>



<p>I happened to be looking at the Transmitter the other day, after I recorded this episode, and low and behold, there was an article titles <a href="https://www.thetransmitter.org/dopamine/reconstructing-dopamines-link-to-reward/">Reconstructing dopamine’s link to reward</a>. Vijay is featured in the article among a handful of other thoughtful researchers who share their work and ideas about this very topic. Vijay wrote his own piece as well: <a href="https://www.thetransmitter.org/dopamine/dopamine-and-the-need-for-alternative-theories/">Dopamine and the need for alternative theories</a>. So check out those articles for more views on how the field is reconsidering how dopamine works.</p>



<ul class="wp-block-list">
<li><a href="https://www.namboodirilab.org/research">Nam Lab</a>.</li>



<li><a href="https://lab.mohebial.com/">Mohebi &amp; Associates (Ali's Lab)</a>.</li>



<li>Twitter:
<ul class="wp-block-list">
<li><a href="https://x.com/vijay_mkn">@vijay_mkn</a></li>



<li><a href="https://twitter.com/mohebial">@mohebial</a></li>
</ul>
</li>



<li>Transmitter
<ul class="wp-block-list">
<li><a href="https://www.thetransmitter.org/dopamine/dopamine-and-the-need-for-alternative-theories/">Dopamine and the need for alternative theories</a>.</li>



<li><a href="https://www.thetransmitter.org/dopamine/reconstructing-dopamines-link-to-reward/">Reconstructing dopamine’s link to reward</a>.</li>
</ul>
</li>



<li>Related papers
<ul class="wp-block-list">
<li><a href="https://www.science.org/stoken/author-tokens/ST-895/full">Mesolimbic dopamine release conveys causal associations</a>.</li>



<li><a href="https://www.science.org/doi/full/10.1126/sciadv.adn4203">Mesostriatal dopamine is sensitive to changes in specific cue-reward contingencies</a>.</li>



<li><a href="https://www.biorxiv.org/content/10.1101/2021.02.07.430001v1.abstract">What is the state space of the world for real animals?</a></li>



<li><a href="https://www.sciencedirect.com/science/article/pii/S0896627321007078">The learning of prospective and retrospective cog...</a></li></ul></li></ul>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Support the show to get full episodes and join the Discord community.








https://youtu.be/lbKEOdbeqHo




The Transmitter is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists. 











The Transmitter has provided a transcript for this episode.



Vijay Namoodiri runs the Nam Lab at the University of California San Francisco, and Ali Mojebi is an assistant professor at the University of Wisconsin-Madison. Ali as been on the podcast before a few times, and he's interested in how neuromodulators like dopamine affect our cognition. And it was Ali who pointed me to Vijay, because of some recent work Vijay has done reassessing how dopamine might function differently than what has become the classic story of dopamine's function as it pertains to learning. The classic story is that dopamine is related to reward prediction errors. That is, dopamine is modulated when you expect reward and don't get it, and/or when you don't expect reward but do get it. Vijay calls this a "prospective" account of dopamine function, since it requires an animal to look into the future to expect a reward. Vijay has shown, however, that a retrospective account of dopamine might better explain lots of know behavioral data. This retrospective account links dopamine to how we understand causes and effects in our ongoing behavior. So in this episode, Vijay gives us a history lesson about dopamine, his newer story and why it has caused a bit of controversy, and how all of this came to be.



I happened to be looking at the Transmitter the other day, after I recorded this episode, and low and behold, there was an article titles Reconstructing dopamine’s link to reward. Vijay is featured in the article among a handful of other thoughtful researchers who share their work and ideas about this very topic. Vijay wrote his own piece as well: Dopamine and the need for alternative theories. So check out those articles for more views on how the field is reconsidering how dopamine works.




Nam Lab.



Mohebi & Associates (Ali's Lab).



Twitter:

@vijay_mkn



@mohebial





Transmitter

Dopamine and the need for alternative theories.



Reconstructing dopamine’s link to reward.





Related papers

Mesolimbic dopamine release conveys causal associations.



Mesostriatal dopamine is sensitive to changes in specific cue-reward contingencies.



What is the state space of the world for real animals?



The learning of prospective and retrospective cog...]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 194 Vijay Namboodiri & Ali Mohebi: Dopamine Keeps Getting More Interesting]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>




https://youtu.be/lbKEOdbeqHo




<p><em>The Transmitter </em>is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit <a href="http://thetransmitter.org/">thetransmitter.org</a> to explore the latest neuroscience news and perspectives, written by journalists and scientists. </p>











<p>The Transmitter has provided a transcript for this episode.</p>



<p>Vijay Namoodiri runs the Nam Lab at the University of California San Francisco, and Ali Mojebi is an assistant professor at the University of Wisconsin-Madison. Ali as been on the podcast before a few times, and he's interested in how neuromodulators like dopamine affect our cognition. And it was Ali who pointed me to Vijay, because of some recent work Vijay has done reassessing how dopamine might function differently than what has become the classic story of dopamine's function as it pertains to learning. The classic story is that dopamine is related to reward prediction errors. That is, dopamine is modulated when you expect reward and don't get it, and/or when you don't expect reward but do get it. Vijay calls this a "prospective" account of dopamine function, since it requires an animal to look into the future to expect a reward. Vijay has shown, however, that a retrospective account of dopamine might better explain lots of know behavioral data. This retrospective account links dopamine to how we understand causes and effects in our ongoing behavior. So in this episode, Vijay gives us a history lesson about dopamine, his newer story and why it has caused a bit of controversy, and how all of this came to be.</p>



<p>I happened to be looking at the Transmitter the other day, after I recorded this episode, and low and behold, there was an article titles <a href="https://www.thetransmitter.org/dopamine/reconstructing-dopamines-link-to-reward/">Reconstructing dopamine’s link to reward</a>. Vijay is featured in the article among a handful of other thoughtful researchers who share their work and ideas about this very topic. Vijay wrote his own piece as well: <a href="https://www.thetransmitter.org/dopamine/dopamine-and-the-need-for-alternative-theories/">Dopamine and the need for alternative theories</a>. So check out those articles for more views on how the field is reconsidering how dopamine works.</p>



<ul class="wp-block-list">
<li><a href="https://www.namboodirilab.org/research">Nam Lab</a>.</li>



<li><a href="https://lab.mohebial.com/">Mohebi &amp; Associates (Ali's Lab)</a>.</li>



<li>Twitter:
<ul class="wp-block-list">
<li><a href="https://x.com/vijay_mkn">@vijay_mkn</a></li>



<li><a href="https://twitter.com/mohebial">@mohebial</a></li>
</ul>
</li>



<li>Transmitter
<ul class="wp-block-list">
<li><a href="https://www.thetransmitter.org/dopamine/dopamine-and-the-need-for-alternative-theories/">Dopamine and the need for alternative theories</a>.</li>



<li><a href="https://www.thetransmitter.org/dopamine/reconstructing-dopamines-link-to-reward/">Reconstructing dopamine’s link to reward</a>.</li>
</ul>
</li>



<li>Related papers
<ul class="wp-block-list">
<li><a href="https://www.science.org/stoken/author-tokens/ST-895/full">Mesolimbic dopamine release conveys causal associations</a>.</li>



<li><a href="https://www.science.org/doi/full/10.1126/sciadv.adn4203">Mesostriatal dopamine is sensitive to changes in specific cue-reward contingencies</a>.</li>



<li><a href="https://www.biorxiv.org/content/10.1101/2021.02.07.430001v1.abstract">What is the state space of the world for real animals?</a></li>



<li><a href="https://www.sciencedirect.com/science/article/pii/S0896627321007078">The learning of prospective and retrospective cognitive maps within neural circuits</a></li>
</ul>
</li>



<li>Further reading
<ul class="wp-block-list">
<li>(Ali's paper): <a href="https://www.nature.com/articles/s41593-023-01566-3">Dopamine transients follow a striatal gradient of reward time horizons.</a></li>



<li>Ali listed a bunch of work on local modulation of DA release:
<ul class="wp-block-list">
<li><a href="https://www.frontiersin.org/journals/behavioral-neuroscience/articles/10.3389/fnbeh.2014.00188/full">Local control of striatal dopamine release</a>.</li>



<li><a href="https://pubmed.ncbi.nlm.nih.gov/35931070/">Synaptic-like axo-axonal transmission from striatal cholinergic interneurons onto dopaminergic fibers</a>.</li>



<li><a href="https://pubmed.ncbi.nlm.nih.gov/33837376/">Spatial and temporal scales of dopamine transmission</a>.</li>



<li><a href="https://pubmed.ncbi.nlm.nih.gov/27141430/">Striatal dopamine neurotransmission: Regulation of release and uptake</a>.</li>



<li><a href="https://pubmed.ncbi.nlm.nih.gov/22794260/">Striatal Dopamine Release Is Triggered by Synchronized Activity in Cholinergic Interneurons</a>.</li>



<li><a href="https://www.science.org/doi/10.1126/science.abn0532">An action potential initiation mechanism in distal axons for the control of dopamine release</a>.</li>
</ul>
</li>
</ul>
</li>
</ul>



<p><a href="https://www.thetransmitter.org/wp-content/uploads/2024/09/BI-194-transcript-final.pdf">Read the transcript</a>, produced by <a href="http://thetransmitter.org/">The Transmitter</a>.</p>



<p>0:00 - Intro
3:42 - Dopamine: the history of theories
32:54 - Importance of learning and behavior studies
39:12 - Dopamine and causality
1:06:45 - Controversy over Vijay's findings</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/1843163/c1e-z9xxcm1304u1xx0n-jp4wpvrrs8dd-d6bmtu.mp3" length="94118783"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Support the show to get full episodes and join the Discord community.








https://youtu.be/lbKEOdbeqHo




The Transmitter is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists. 











The Transmitter has provided a transcript for this episode.



Vijay Namoodiri runs the Nam Lab at the University of California San Francisco, and Ali Mojebi is an assistant professor at the University of Wisconsin-Madison. Ali as been on the podcast before a few times, and he's interested in how neuromodulators like dopamine affect our cognition. And it was Ali who pointed me to Vijay, because of some recent work Vijay has done reassessing how dopamine might function differently than what has become the classic story of dopamine's function as it pertains to learning. The classic story is that dopamine is related to reward prediction errors. That is, dopamine is modulated when you expect reward and don't get it, and/or when you don't expect reward but do get it. Vijay calls this a "prospective" account of dopamine function, since it requires an animal to look into the future to expect a reward. Vijay has shown, however, that a retrospective account of dopamine might better explain lots of know behavioral data. This retrospective account links dopamine to how we understand causes and effects in our ongoing behavior. So in this episode, Vijay gives us a history lesson about dopamine, his newer story and why it has caused a bit of controversy, and how all of this came to be.



I happened to be looking at the Transmitter the other day, after I recorded this episode, and low and behold, there was an article titles Reconstructing dopamine’s link to reward. Vijay is featured in the article among a handful of other thoughtful researchers who share their work and ideas about this very topic. Vijay wrote his own piece as well: Dopamine and the need for alternative theories. So check out those articles for more views on how the field is reconsidering how dopamine works.




Nam Lab.



Mohebi & Associates (Ali's Lab).



Twitter:

@vijay_mkn



@mohebial





Transmitter

Dopamine and the need for alternative theories.



Reconstructing dopamine’s link to reward.





Related papers

Mesolimbic dopamine release conveys causal associations.



Mesostriatal dopamine is sensitive to changes in specific cue-reward contingencies.



What is the state space of the world for real animals?



The learning of prospective and retrospective cog...]]>
                </itunes:summary>
                                                                            <itunes:duration>01:37:21</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 193 Kim Stachenfeld: Enhancing Neuroscience and AI]]>
                </title>
                <pubDate>Wed, 11 Sep 2024 10:36:38 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/1832575</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-193-kim-stachenfeld-enhancing-neuroscience-and-ai-1</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p><em>The Transmitter </em>is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit <a href="http://thetransmitter.org/">thetransmitter.org</a> to explore the latest neuroscience news and perspectives, written by journalists and scientists. </p>



<p>Read more about <a href="https://www.thetransmitter.org/partners/">our partnership</a>.</p>



<p>Check out this story:  <strong><a href="https://www.thetransmitter.org/cognitive-neuroscience/monkeys-build-mental-maps-to-navigate-new-tasks/">Monkeys build mental maps to navigate new tasks</a> </strong></p>



<p>Sign up for <a href="https://www.thetransmitter.org/newsletters/">“Brain Inspired” email alerts</a> to be notified every time a new “Brain Inspired” episode is released.</p>



<p>To explore more neuroscience news and perspectives, <strong>visit <a href="http://thetransmitter.org/">thetransmitter.org</a>.</strong></p>







<p>Kim Stachenfeld embodies the original core focus of this podcast, the exploration of the intersection between neuroscience and AI, now commonly known as Neuro-AI. That's because she walks both lines. Kim is a Senior Research Scientist at Google DeepMind, the AI company that sprang from neuroscience principles, and also does research at the Center for Theoretical Neuroscience at Columbia University. She's been using her expertise in modeling, and reinforcement learning, and cognitive maps, for example, to help understand brains and to help improve AI. I've been wanting to have her on for a long time to get her broad perspective on AI and neuroscience.</p>



<p>We discuss the relative roles of industry and academia in pursuing various objectives related to understanding and building cognitive entities</p>



<p>She's studied the hippocampus in her research on reinforcement learning and cognitive maps, so we discuss what the heck the hippocampus does since it seems to implicated in so many functions, and how she thinks of reinforcement learning these days.</p>



<p>Most recently Kim at Deepmind has focused on more practical engineering questions, using deep learning models to predict things like chaotic turbulent flows, and even to help design things like bridges and airplanes. And we don't get into the specifics of that work, but, given that I just spoke with Damian Kelty-Stephen, who thinks of brains partially as turbulent cascades, Kim and I discuss how her work on modeling turbulence has shaped her thoughts about brains.</p>



<ul class="wp-block-list">
<li><a href="https://neurokim.com/">Kim's website</a>.</li>



<li>Twitter: <a href="https://twitter.com/neuro_kim?ref_src=twsrc%5Etfw%7Ctwcamp%5Eembeddedtimeline%7Ctwterm%5Escreen-name%3Aneuro_kim%7Ctwcon%5Es2">@neuro_kim</a>.</li>



<li>Related papers
<ul class="wp-block-list">
<li><a href="https://arxiv.org/abs/2001.08361">Scaling Laws for Neural Language Models</a>.</li>



<li><a href="https://arxiv.org/abs/2206.07682">Emergent Abilities of Large Language Models</a>.</li>



<li>Learned simulators:
<ul class="wp-block-list">
<li><a href="https://arxiv.org/abs/2112.15275">Learned coarse models for efficient turbulence simulation</a>.</li>



<li><a href="https://arxiv.org/pdf/2202.00728">Physical design using differentiable learned simulators</a>.</li>
</ul>
</li>
</ul>
</li>
</ul>



<p>Check out <a href="https://www.thetransmitter.org/wp-content/uploads/2024/09/kim-stachenfeld-transcript-final.pdf" target="_blank" rel="noreferrer noopener">the transcript</a>, provided by The Transmitter.</p>



<p>0:00 - Intro
4:31 - Deepmind's original and current vision
9:53 - AI as tools and models
12:53 - Has AI hindered neuroscience?
17:05 - Deepmind vs academic work balance...</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Support the show to get full episodes and join the Discord community.









The Transmitter is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists. 



Read more about our partnership.



Check out this story:  Monkeys build mental maps to navigate new tasks 



Sign up for “Brain Inspired” email alerts to be notified every time a new “Brain Inspired” episode is released.



To explore more neuroscience news and perspectives, visit thetransmitter.org.







Kim Stachenfeld embodies the original core focus of this podcast, the exploration of the intersection between neuroscience and AI, now commonly known as Neuro-AI. That's because she walks both lines. Kim is a Senior Research Scientist at Google DeepMind, the AI company that sprang from neuroscience principles, and also does research at the Center for Theoretical Neuroscience at Columbia University. She's been using her expertise in modeling, and reinforcement learning, and cognitive maps, for example, to help understand brains and to help improve AI. I've been wanting to have her on for a long time to get her broad perspective on AI and neuroscience.



We discuss the relative roles of industry and academia in pursuing various objectives related to understanding and building cognitive entities



She's studied the hippocampus in her research on reinforcement learning and cognitive maps, so we discuss what the heck the hippocampus does since it seems to implicated in so many functions, and how she thinks of reinforcement learning these days.



Most recently Kim at Deepmind has focused on more practical engineering questions, using deep learning models to predict things like chaotic turbulent flows, and even to help design things like bridges and airplanes. And we don't get into the specifics of that work, but, given that I just spoke with Damian Kelty-Stephen, who thinks of brains partially as turbulent cascades, Kim and I discuss how her work on modeling turbulence has shaped her thoughts about brains.




Kim's website.



Twitter: @neuro_kim.



Related papers

Scaling Laws for Neural Language Models.



Emergent Abilities of Large Language Models.



Learned simulators:

Learned coarse models for efficient turbulence simulation.



Physical design using differentiable learned simulators.








Check out the transcript, provided by The Transmitter.



0:00 - Intro
4:31 - Deepmind's original and current vision
9:53 - AI as tools and models
12:53 - Has AI hindered neuroscience?
17:05 - Deepmind vs academic work balance...]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 193 Kim Stachenfeld: Enhancing Neuroscience and AI]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p><em>The Transmitter </em>is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit <a href="http://thetransmitter.org/">thetransmitter.org</a> to explore the latest neuroscience news and perspectives, written by journalists and scientists. </p>



<p>Read more about <a href="https://www.thetransmitter.org/partners/">our partnership</a>.</p>



<p>Check out this story:  <strong><a href="https://www.thetransmitter.org/cognitive-neuroscience/monkeys-build-mental-maps-to-navigate-new-tasks/">Monkeys build mental maps to navigate new tasks</a> </strong></p>



<p>Sign up for <a href="https://www.thetransmitter.org/newsletters/">“Brain Inspired” email alerts</a> to be notified every time a new “Brain Inspired” episode is released.</p>



<p>To explore more neuroscience news and perspectives, <strong>visit <a href="http://thetransmitter.org/">thetransmitter.org</a>.</strong></p>







<p>Kim Stachenfeld embodies the original core focus of this podcast, the exploration of the intersection between neuroscience and AI, now commonly known as Neuro-AI. That's because she walks both lines. Kim is a Senior Research Scientist at Google DeepMind, the AI company that sprang from neuroscience principles, and also does research at the Center for Theoretical Neuroscience at Columbia University. She's been using her expertise in modeling, and reinforcement learning, and cognitive maps, for example, to help understand brains and to help improve AI. I've been wanting to have her on for a long time to get her broad perspective on AI and neuroscience.</p>



<p>We discuss the relative roles of industry and academia in pursuing various objectives related to understanding and building cognitive entities</p>



<p>She's studied the hippocampus in her research on reinforcement learning and cognitive maps, so we discuss what the heck the hippocampus does since it seems to implicated in so many functions, and how she thinks of reinforcement learning these days.</p>



<p>Most recently Kim at Deepmind has focused on more practical engineering questions, using deep learning models to predict things like chaotic turbulent flows, and even to help design things like bridges and airplanes. And we don't get into the specifics of that work, but, given that I just spoke with Damian Kelty-Stephen, who thinks of brains partially as turbulent cascades, Kim and I discuss how her work on modeling turbulence has shaped her thoughts about brains.</p>



<ul class="wp-block-list">
<li><a href="https://neurokim.com/">Kim's website</a>.</li>



<li>Twitter: <a href="https://twitter.com/neuro_kim?ref_src=twsrc%5Etfw%7Ctwcamp%5Eembeddedtimeline%7Ctwterm%5Escreen-name%3Aneuro_kim%7Ctwcon%5Es2">@neuro_kim</a>.</li>



<li>Related papers
<ul class="wp-block-list">
<li><a href="https://arxiv.org/abs/2001.08361">Scaling Laws for Neural Language Models</a>.</li>



<li><a href="https://arxiv.org/abs/2206.07682">Emergent Abilities of Large Language Models</a>.</li>



<li>Learned simulators:
<ul class="wp-block-list">
<li><a href="https://arxiv.org/abs/2112.15275">Learned coarse models for efficient turbulence simulation</a>.</li>



<li><a href="https://arxiv.org/pdf/2202.00728">Physical design using differentiable learned simulators</a>.</li>
</ul>
</li>
</ul>
</li>
</ul>



<p>Check out <a href="https://www.thetransmitter.org/wp-content/uploads/2024/09/kim-stachenfeld-transcript-final.pdf" target="_blank" rel="noreferrer noopener">the transcript</a>, provided by The Transmitter.</p>



<p>0:00 - Intro
4:31 - Deepmind's original and current vision
9:53 - AI as tools and models
12:53 - Has AI hindered neuroscience?
17:05 - Deepmind vs academic work balance
20:47 - Is industry better suited to understand brains?
24?42 - Trajectory of Deepmind
27:41 - Kim's trajectory
33:35 - Is the brain a ML entity?
36:12 - Hippocampus
44:12 - Reinforcement learning
51:32 - What does neuroscience need more and less of?
1:02:53 - Neuroscience in a weird place?
1:06:41 - How Kim's questions have changed
1:16:31 - Intelligence and LLMs
1:25:34 - Challenges</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/1832575/c1e-d5wwa64nnouzpdm9-v61gwkvkbr25-wbl5ps.mp3" length="90412357"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Support the show to get full episodes and join the Discord community.









The Transmitter is an online publication that aims to deliver useful information, insights and tools to build bridges across neuroscience and advance research. Visit thetransmitter.org to explore the latest neuroscience news and perspectives, written by journalists and scientists. 



Read more about our partnership.



Check out this story:  Monkeys build mental maps to navigate new tasks 



Sign up for “Brain Inspired” email alerts to be notified every time a new “Brain Inspired” episode is released.



To explore more neuroscience news and perspectives, visit thetransmitter.org.







Kim Stachenfeld embodies the original core focus of this podcast, the exploration of the intersection between neuroscience and AI, now commonly known as Neuro-AI. That's because she walks both lines. Kim is a Senior Research Scientist at Google DeepMind, the AI company that sprang from neuroscience principles, and also does research at the Center for Theoretical Neuroscience at Columbia University. She's been using her expertise in modeling, and reinforcement learning, and cognitive maps, for example, to help understand brains and to help improve AI. I've been wanting to have her on for a long time to get her broad perspective on AI and neuroscience.



We discuss the relative roles of industry and academia in pursuing various objectives related to understanding and building cognitive entities



She's studied the hippocampus in her research on reinforcement learning and cognitive maps, so we discuss what the heck the hippocampus does since it seems to implicated in so many functions, and how she thinks of reinforcement learning these days.



Most recently Kim at Deepmind has focused on more practical engineering questions, using deep learning models to predict things like chaotic turbulent flows, and even to help design things like bridges and airplanes. And we don't get into the specifics of that work, but, given that I just spoke with Damian Kelty-Stephen, who thinks of brains partially as turbulent cascades, Kim and I discuss how her work on modeling turbulence has shaped her thoughts about brains.




Kim's website.



Twitter: @neuro_kim.



Related papers

Scaling Laws for Neural Language Models.



Emergent Abilities of Large Language Models.



Learned simulators:

Learned coarse models for efficient turbulence simulation.



Physical design using differentiable learned simulators.








Check out the transcript, provided by The Transmitter.



0:00 - Intro
4:31 - Deepmind's original and current vision
9:53 - AI as tools and models
12:53 - Has AI hindered neuroscience?
17:05 - Deepmind vs academic work balance...]]>
                </itunes:summary>
                                                                            <itunes:duration>01:32:41</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 192 Àlex Gómez-Marín: The Edges of Consciousness]]>
                </title>
                <pubDate>Wed, 28 Aug 2024 23:36:02 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/1825420</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-192-alex-gomez-marin-the-edges-of-consciousness</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>







<p>Àlex Gómez-Marín heads <a href="https://behavior-of-organisms.org/">The Behavior of Organisms Laboratory</a> at the Institute of Neuroscience in Alicante, Spain. He's one of those theoretical physicist turned neuroscientist, and he has studied a wide range of topics over his career. Most recently, he has become interested in what he calls the "edges of consciousness", which encompasses the many trying to explain what may be happening when we have experiences outside our normal everyday experiences. For example, when we are under the influence of hallucinogens, when have near-death experiences (as Alex has), paranormal experiences, and so on.</p>



<p>So we discuss what led up to his interests in these edges of consciousness, how he now thinks about consciousness and doing science in general, how important it is to make room for all possible explanations of phenomena, and to leave our metaphysics open all the while.</p>



<ul class="wp-block-list">
<li>Alex's website: <a href="https://behavior-of-organisms.org/">The Behavior of Organisms Laboratory</a>.</li>



<li>Twitter: <a href="https://twitter.com/behaviOrganisms">@behaviOrganisms</a>.</li>



<li>Previous episodes:
<ul class="wp-block-list">
<li><a href="https://braininspired.co/podcast/168/">BI 168 Frauke Sandig and Eric Black w Alex Gomez-Marin: AWARE: Glimpses of Consciousness</a>.</li>



<li><a href="https://braininspired.co/podcast/136/">BI 136 Michel Bitbol and Alex Gomez-Marin: Phenomenology</a>.</li>
</ul>
</li>



<li>Related:
<ul class="wp-block-list">
<li><a href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC10646881/">The Consciousness of Neuroscience</a>.</li>



<li><a href="https://iai.tv/articles/seeing-the-consciousness-forest-for-the-trees-auid-2901?_auid=2020">Seeing the consciousness forest for the trees</a>.</li>



<li><a href="https://www.nature.com/articles/d41586-024-02603-2.pdf">The stairway to transhumanist heaven</a>.</li>
</ul>
</li>
</ul>



<p>0:00 - Intro
4:13 - Evolving viewpoints
10:05 - Near-death experience
18:30 - Mechanistic neuroscience vs. the rest
22:46 - Are you doing science?
33:46 - Where is my. mind?
44:55 - Productive vs. permissive brain
59:30 - Panpsychism
1:07:58 - Materialism
1:10:38 - How to choose what to do
1:16:54 - Fruit flies
1:19:52 - AI and the Singularity</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Support the show to get full episodes and join the Discord community.











Àlex Gómez-Marín heads The Behavior of Organisms Laboratory at the Institute of Neuroscience in Alicante, Spain. He's one of those theoretical physicist turned neuroscientist, and he has studied a wide range of topics over his career. Most recently, he has become interested in what he calls the "edges of consciousness", which encompasses the many trying to explain what may be happening when we have experiences outside our normal everyday experiences. For example, when we are under the influence of hallucinogens, when have near-death experiences (as Alex has), paranormal experiences, and so on.



So we discuss what led up to his interests in these edges of consciousness, how he now thinks about consciousness and doing science in general, how important it is to make room for all possible explanations of phenomena, and to leave our metaphysics open all the while.




Alex's website: The Behavior of Organisms Laboratory.



Twitter: @behaviOrganisms.



Previous episodes:

BI 168 Frauke Sandig and Eric Black w Alex Gomez-Marin: AWARE: Glimpses of Consciousness.



BI 136 Michel Bitbol and Alex Gomez-Marin: Phenomenology.





Related:

The Consciousness of Neuroscience.



Seeing the consciousness forest for the trees.



The stairway to transhumanist heaven.






0:00 - Intro
4:13 - Evolving viewpoints
10:05 - Near-death experience
18:30 - Mechanistic neuroscience vs. the rest
22:46 - Are you doing science?
33:46 - Where is my. mind?
44:55 - Productive vs. permissive brain
59:30 - Panpsychism
1:07:58 - Materialism
1:10:38 - How to choose what to do
1:16:54 - Fruit flies
1:19:52 - AI and the Singularity]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 192 Àlex Gómez-Marín: The Edges of Consciousness]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>







<p>Àlex Gómez-Marín heads <a href="https://behavior-of-organisms.org/">The Behavior of Organisms Laboratory</a> at the Institute of Neuroscience in Alicante, Spain. He's one of those theoretical physicist turned neuroscientist, and he has studied a wide range of topics over his career. Most recently, he has become interested in what he calls the "edges of consciousness", which encompasses the many trying to explain what may be happening when we have experiences outside our normal everyday experiences. For example, when we are under the influence of hallucinogens, when have near-death experiences (as Alex has), paranormal experiences, and so on.</p>



<p>So we discuss what led up to his interests in these edges of consciousness, how he now thinks about consciousness and doing science in general, how important it is to make room for all possible explanations of phenomena, and to leave our metaphysics open all the while.</p>



<ul class="wp-block-list">
<li>Alex's website: <a href="https://behavior-of-organisms.org/">The Behavior of Organisms Laboratory</a>.</li>



<li>Twitter: <a href="https://twitter.com/behaviOrganisms">@behaviOrganisms</a>.</li>



<li>Previous episodes:
<ul class="wp-block-list">
<li><a href="https://braininspired.co/podcast/168/">BI 168 Frauke Sandig and Eric Black w Alex Gomez-Marin: AWARE: Glimpses of Consciousness</a>.</li>



<li><a href="https://braininspired.co/podcast/136/">BI 136 Michel Bitbol and Alex Gomez-Marin: Phenomenology</a>.</li>
</ul>
</li>



<li>Related:
<ul class="wp-block-list">
<li><a href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC10646881/">The Consciousness of Neuroscience</a>.</li>



<li><a href="https://iai.tv/articles/seeing-the-consciousness-forest-for-the-trees-auid-2901?_auid=2020">Seeing the consciousness forest for the trees</a>.</li>



<li><a href="https://www.nature.com/articles/d41586-024-02603-2.pdf">The stairway to transhumanist heaven</a>.</li>
</ul>
</li>
</ul>



<p>0:00 - Intro
4:13 - Evolving viewpoints
10:05 - Near-death experience
18:30 - Mechanistic neuroscience vs. the rest
22:46 - Are you doing science?
33:46 - Where is my. mind?
44:55 - Productive vs. permissive brain
59:30 - Panpsychism
1:07:58 - Materialism
1:10:38 - How to choose what to do
1:16:54 - Fruit flies
1:19:52 - AI and the Singularity</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/1825420/c1e-8955c9nrqxbp0x4k-kp28xr4zcj8-lgh6e4.mp3" length="88171545"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Support the show to get full episodes and join the Discord community.











Àlex Gómez-Marín heads The Behavior of Organisms Laboratory at the Institute of Neuroscience in Alicante, Spain. He's one of those theoretical physicist turned neuroscientist, and he has studied a wide range of topics over his career. Most recently, he has become interested in what he calls the "edges of consciousness", which encompasses the many trying to explain what may be happening when we have experiences outside our normal everyday experiences. For example, when we are under the influence of hallucinogens, when have near-death experiences (as Alex has), paranormal experiences, and so on.



So we discuss what led up to his interests in these edges of consciousness, how he now thinks about consciousness and doing science in general, how important it is to make room for all possible explanations of phenomena, and to leave our metaphysics open all the while.




Alex's website: The Behavior of Organisms Laboratory.



Twitter: @behaviOrganisms.



Previous episodes:

BI 168 Frauke Sandig and Eric Black w Alex Gomez-Marin: AWARE: Glimpses of Consciousness.



BI 136 Michel Bitbol and Alex Gomez-Marin: Phenomenology.





Related:

The Consciousness of Neuroscience.



Seeing the consciousness forest for the trees.



The stairway to transhumanist heaven.






0:00 - Intro
4:13 - Evolving viewpoints
10:05 - Near-death experience
18:30 - Mechanistic neuroscience vs. the rest
22:46 - Are you doing science?
33:46 - Where is my. mind?
44:55 - Productive vs. permissive brain
59:30 - Panpsychism
1:07:58 - Materialism
1:10:38 - How to choose what to do
1:16:54 - Fruit flies
1:19:52 - AI and the Singularity]]>
                </itunes:summary>
                                                                            <itunes:duration>01:30:34</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 191 Damian Kelty-Stephen: Fractal Turbulent Cascading Intelligence]]>
                </title>
                <pubDate>Thu, 15 Aug 2024 13:31:11 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/1811746</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-191-damian-kelty-stephen-fractal-turbulent-cascading-intelligence-1</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>







<p>Damian Kelty-Stephen is an experimental psychologist at State University of New York at New Paltz. Last episode with Luis Favela, we discussed many of the ideas from ecological psychology, and how Louie is trying to reconcile those principles with those of neuroscience. In this episode, Damian and I in some ways continue that discussion, because Damian is also interested in unifying principles of ecological psychology and neuroscience. However, he is approaching it from a different perspective that Louie. What drew me originally to Damian was a paper he put together with a bunch of authors offering their own alternatives to the computer metaphor of the brain, which has come to dominate neuroscience. And we discuss that some, and I'll link to the paper in the show notes. But mostly we discuss Damian's work studying the fractal structure of our behaviors, connecting that structure across scales, and linking it to how our brains and bodies interact to produce our behaviors. Along the way, we talk about his interests in cascades dynamics and turbulence to also explain our intelligence and behaviors. So, I hope you enjoy this alternative slice into thinking about how we think and move in our bodies and in the world.</p>



<ul class="wp-block-list">
<li><a href="https://sites.google.com/site/foovian/">Damian's website</a>.</li>



<li>Related papers
<ul class="wp-block-list">
<li><a href="https://arxiv.org/abs/2206.04603">In search for an alternative to the computer metaphor of the mind and brain</a>.</li>



<li><a href="https://arxiv.org/html/2401.05105v1">Multifractal emergent processes: Multiplicative interactions override nonlinear component properties</a>.</li>
</ul>
</li>
</ul>





<p>0:00 - Intro
2:34 - Damian's background
9:02 - Brains
12:56 - Do neuroscientists have it all wrong?
16:56 - Fractals everywhere
28:01 - Fractality, causality, and cascades
32:01 - Cascade instability as a metaphor for the brain
40:43 - Damian's worldview
46:09 - What is AI missing?
54:26 - Turbulence
1:01:02 - Intelligence without fractals? Multifractality
1:10:28 - Ergodicity
1:19:16 - Fractality, intelligence, life
1:23:24 - What's exciting, changing viewpoints</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Support the show to get full episodes and join the Discord community.











Damian Kelty-Stephen is an experimental psychologist at State University of New York at New Paltz. Last episode with Luis Favela, we discussed many of the ideas from ecological psychology, and how Louie is trying to reconcile those principles with those of neuroscience. In this episode, Damian and I in some ways continue that discussion, because Damian is also interested in unifying principles of ecological psychology and neuroscience. However, he is approaching it from a different perspective that Louie. What drew me originally to Damian was a paper he put together with a bunch of authors offering their own alternatives to the computer metaphor of the brain, which has come to dominate neuroscience. And we discuss that some, and I'll link to the paper in the show notes. But mostly we discuss Damian's work studying the fractal structure of our behaviors, connecting that structure across scales, and linking it to how our brains and bodies interact to produce our behaviors. Along the way, we talk about his interests in cascades dynamics and turbulence to also explain our intelligence and behaviors. So, I hope you enjoy this alternative slice into thinking about how we think and move in our bodies and in the world.




Damian's website.



Related papers

In search for an alternative to the computer metaphor of the mind and brain.



Multifractal emergent processes: Multiplicative interactions override nonlinear component properties.








0:00 - Intro
2:34 - Damian's background
9:02 - Brains
12:56 - Do neuroscientists have it all wrong?
16:56 - Fractals everywhere
28:01 - Fractality, causality, and cascades
32:01 - Cascade instability as a metaphor for the brain
40:43 - Damian's worldview
46:09 - What is AI missing?
54:26 - Turbulence
1:01:02 - Intelligence without fractals? Multifractality
1:10:28 - Ergodicity
1:19:16 - Fractality, intelligence, life
1:23:24 - What's exciting, changing viewpoints]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 191 Damian Kelty-Stephen: Fractal Turbulent Cascading Intelligence]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>







<p>Damian Kelty-Stephen is an experimental psychologist at State University of New York at New Paltz. Last episode with Luis Favela, we discussed many of the ideas from ecological psychology, and how Louie is trying to reconcile those principles with those of neuroscience. In this episode, Damian and I in some ways continue that discussion, because Damian is also interested in unifying principles of ecological psychology and neuroscience. However, he is approaching it from a different perspective that Louie. What drew me originally to Damian was a paper he put together with a bunch of authors offering their own alternatives to the computer metaphor of the brain, which has come to dominate neuroscience. And we discuss that some, and I'll link to the paper in the show notes. But mostly we discuss Damian's work studying the fractal structure of our behaviors, connecting that structure across scales, and linking it to how our brains and bodies interact to produce our behaviors. Along the way, we talk about his interests in cascades dynamics and turbulence to also explain our intelligence and behaviors. So, I hope you enjoy this alternative slice into thinking about how we think and move in our bodies and in the world.</p>



<ul class="wp-block-list">
<li><a href="https://sites.google.com/site/foovian/">Damian's website</a>.</li>



<li>Related papers
<ul class="wp-block-list">
<li><a href="https://arxiv.org/abs/2206.04603">In search for an alternative to the computer metaphor of the mind and brain</a>.</li>



<li><a href="https://arxiv.org/html/2401.05105v1">Multifractal emergent processes: Multiplicative interactions override nonlinear component properties</a>.</li>
</ul>
</li>
</ul>





<p>0:00 - Intro
2:34 - Damian's background
9:02 - Brains
12:56 - Do neuroscientists have it all wrong?
16:56 - Fractals everywhere
28:01 - Fractality, causality, and cascades
32:01 - Cascade instability as a metaphor for the brain
40:43 - Damian's worldview
46:09 - What is AI missing?
54:26 - Turbulence
1:01:02 - Intelligence without fractals? Multifractality
1:10:28 - Ergodicity
1:19:16 - Fractality, intelligence, life
1:23:24 - What's exciting, changing viewpoints</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/1811746/c1e-gkooh3pg70czorg5-xxvrjrg9urpj-vwmmzs.mp3" length="85587027"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Support the show to get full episodes and join the Discord community.











Damian Kelty-Stephen is an experimental psychologist at State University of New York at New Paltz. Last episode with Luis Favela, we discussed many of the ideas from ecological psychology, and how Louie is trying to reconcile those principles with those of neuroscience. In this episode, Damian and I in some ways continue that discussion, because Damian is also interested in unifying principles of ecological psychology and neuroscience. However, he is approaching it from a different perspective that Louie. What drew me originally to Damian was a paper he put together with a bunch of authors offering their own alternatives to the computer metaphor of the brain, which has come to dominate neuroscience. And we discuss that some, and I'll link to the paper in the show notes. But mostly we discuss Damian's work studying the fractal structure of our behaviors, connecting that structure across scales, and linking it to how our brains and bodies interact to produce our behaviors. Along the way, we talk about his interests in cascades dynamics and turbulence to also explain our intelligence and behaviors. So, I hope you enjoy this alternative slice into thinking about how we think and move in our bodies and in the world.




Damian's website.



Related papers

In search for an alternative to the computer metaphor of the mind and brain.



Multifractal emergent processes: Multiplicative interactions override nonlinear component properties.








0:00 - Intro
2:34 - Damian's background
9:02 - Brains
12:56 - Do neuroscientists have it all wrong?
16:56 - Fractals everywhere
28:01 - Fractality, causality, and cascades
32:01 - Cascade instability as a metaphor for the brain
40:43 - Damian's worldview
46:09 - What is AI missing?
54:26 - Turbulence
1:01:02 - Intelligence without fractals? Multifractality
1:10:28 - Ergodicity
1:19:16 - Fractality, intelligence, life
1:23:24 - What's exciting, changing viewpoints]]>
                </itunes:summary>
                                                                            <itunes:duration>01:27:51</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 190 Luis Favela: The Ecological Brain]]>
                </title>
                <pubDate>Wed, 31 Jul 2024 14:27:44 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/1795965</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-190-luis-favela-the-ecological-brain-1</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p>Luis Favela is an Associate Professor at Indiana University Bloomington. He is part philosopher, part cognitive scientist, part many things, and on this episode we discuss his new book, <a href="https://amzn.to/3LbSgrI">The Ecological Brain: Unifying the Sciences of Brain, Body, and Environment</a>.</p>





<p>In the book, Louie presents his NeuroEcological Nexus Theory, or NExT, which, as the subtitle says, proposes a way forward to tie together our brains, our bodies, and the environment; namely it has a lot to do with the complexity sciences and manifolds, which we discuss. But the book doesn't just present his theory. Among other things, it presents a rich historical look into why ecological psychology and neuroscience haven't been exactly friendly over the years, in terms of how to explain our behaviors, the role of brains in those explanations, how to think about what minds are, and so on. And it suggests how the two fields can get over their differences and be friends moving forward. And I'll just say, it's written in a very accessible manner, gently guiding the reader through many of the core concepts and science that have shaped ecological psychology and neuroscience, and for that reason alone I highly it.</p>





<p>Ok, so we discuss a bunch of topics in the book, how Louie thinks, and Louie gives us some great background and historical lessons along the way.</p>



<ul class="wp-block-list">
<li><a href="https://luishfavela.wixsite.com/luishfavela">Luis' website</a>.</li>



<li>Book:
<ul class="wp-block-list">
<li><a href="https://amzn.to/3LbSgrI">The Ecological Brain: Unifying the Sciences of Brain, Body, and Environment</a></li>
</ul>
</li>
</ul>



<p>0:00 - Intro
7:05 - Louie's target with NEXT
20:37 - Ecological psychology and grid cells
22:06 - Why irreconcilable?
28:59 - Why hasn't ecological psychology evolved more?
47:13 - NExT
49:10 - Hypothesis 1
55:45 - Hypothesis 2
1:02:55 - Artificial intelligence and ecological psychology
1:16:33 - Manifolds
1:31:20 - Hypothesis 4: Body, low-D, Synergies
1:35:53 - Hypothesis 5: Mind emerges
1:36:23 - Hypothesis 6:</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Support the show to get full episodes and join the Discord community.









Luis Favela is an Associate Professor at Indiana University Bloomington. He is part philosopher, part cognitive scientist, part many things, and on this episode we discuss his new book, The Ecological Brain: Unifying the Sciences of Brain, Body, and Environment.





In the book, Louie presents his NeuroEcological Nexus Theory, or NExT, which, as the subtitle says, proposes a way forward to tie together our brains, our bodies, and the environment; namely it has a lot to do with the complexity sciences and manifolds, which we discuss. But the book doesn't just present his theory. Among other things, it presents a rich historical look into why ecological psychology and neuroscience haven't been exactly friendly over the years, in terms of how to explain our behaviors, the role of brains in those explanations, how to think about what minds are, and so on. And it suggests how the two fields can get over their differences and be friends moving forward. And I'll just say, it's written in a very accessible manner, gently guiding the reader through many of the core concepts and science that have shaped ecological psychology and neuroscience, and for that reason alone I highly it.





Ok, so we discuss a bunch of topics in the book, how Louie thinks, and Louie gives us some great background and historical lessons along the way.




Luis' website.



Book:

The Ecological Brain: Unifying the Sciences of Brain, Body, and Environment






0:00 - Intro
7:05 - Louie's target with NEXT
20:37 - Ecological psychology and grid cells
22:06 - Why irreconcilable?
28:59 - Why hasn't ecological psychology evolved more?
47:13 - NExT
49:10 - Hypothesis 1
55:45 - Hypothesis 2
1:02:55 - Artificial intelligence and ecological psychology
1:16:33 - Manifolds
1:31:20 - Hypothesis 4: Body, low-D, Synergies
1:35:53 - Hypothesis 5: Mind emerges
1:36:23 - Hypothesis 6:]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 190 Luis Favela: The Ecological Brain]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p>Luis Favela is an Associate Professor at Indiana University Bloomington. He is part philosopher, part cognitive scientist, part many things, and on this episode we discuss his new book, <a href="https://amzn.to/3LbSgrI">The Ecological Brain: Unifying the Sciences of Brain, Body, and Environment</a>.</p>





<p>In the book, Louie presents his NeuroEcological Nexus Theory, or NExT, which, as the subtitle says, proposes a way forward to tie together our brains, our bodies, and the environment; namely it has a lot to do with the complexity sciences and manifolds, which we discuss. But the book doesn't just present his theory. Among other things, it presents a rich historical look into why ecological psychology and neuroscience haven't been exactly friendly over the years, in terms of how to explain our behaviors, the role of brains in those explanations, how to think about what minds are, and so on. And it suggests how the two fields can get over their differences and be friends moving forward. And I'll just say, it's written in a very accessible manner, gently guiding the reader through many of the core concepts and science that have shaped ecological psychology and neuroscience, and for that reason alone I highly it.</p>





<p>Ok, so we discuss a bunch of topics in the book, how Louie thinks, and Louie gives us some great background and historical lessons along the way.</p>



<ul class="wp-block-list">
<li><a href="https://luishfavela.wixsite.com/luishfavela">Luis' website</a>.</li>



<li>Book:
<ul class="wp-block-list">
<li><a href="https://amzn.to/3LbSgrI">The Ecological Brain: Unifying the Sciences of Brain, Body, and Environment</a></li>
</ul>
</li>
</ul>



<p>0:00 - Intro
7:05 - Louie's target with NEXT
20:37 - Ecological psychology and grid cells
22:06 - Why irreconcilable?
28:59 - Why hasn't ecological psychology evolved more?
47:13 - NExT
49:10 - Hypothesis 1
55:45 - Hypothesis 2
1:02:55 - Artificial intelligence and ecological psychology
1:16:33 - Manifolds
1:31:20 - Hypothesis 4: Body, low-D, Synergies
1:35:53 - Hypothesis 5: Mind emerges
1:36:23 - Hypothesis 6:</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/1795965/c1e-x8qqumvnjnhx2g7n-dm6j836khjdn-anug7q.mp3" length="98281317"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Support the show to get full episodes and join the Discord community.









Luis Favela is an Associate Professor at Indiana University Bloomington. He is part philosopher, part cognitive scientist, part many things, and on this episode we discuss his new book, The Ecological Brain: Unifying the Sciences of Brain, Body, and Environment.





In the book, Louie presents his NeuroEcological Nexus Theory, or NExT, which, as the subtitle says, proposes a way forward to tie together our brains, our bodies, and the environment; namely it has a lot to do with the complexity sciences and manifolds, which we discuss. But the book doesn't just present his theory. Among other things, it presents a rich historical look into why ecological psychology and neuroscience haven't been exactly friendly over the years, in terms of how to explain our behaviors, the role of brains in those explanations, how to think about what minds are, and so on. And it suggests how the two fields can get over their differences and be friends moving forward. And I'll just say, it's written in a very accessible manner, gently guiding the reader through many of the core concepts and science that have shaped ecological psychology and neuroscience, and for that reason alone I highly it.





Ok, so we discuss a bunch of topics in the book, how Louie thinks, and Louie gives us some great background and historical lessons along the way.




Luis' website.



Book:

The Ecological Brain: Unifying the Sciences of Brain, Body, and Environment






0:00 - Intro
7:05 - Louie's target with NEXT
20:37 - Ecological psychology and grid cells
22:06 - Why irreconcilable?
28:59 - Why hasn't ecological psychology evolved more?
47:13 - NExT
49:10 - Hypothesis 1
55:45 - Hypothesis 2
1:02:55 - Artificial intelligence and ecological psychology
1:16:33 - Manifolds
1:31:20 - Hypothesis 4: Body, low-D, Synergies
1:35:53 - Hypothesis 5: Mind emerges
1:36:23 - Hypothesis 6:]]>
                </itunes:summary>
                                                                            <itunes:duration>01:41:03</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 189 Joshua Vogelstein: Connectomes and Prospective Learning]]>
                </title>
                <pubDate>Sat, 29 Jun 2024 12:40:06 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/1774346</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-189-joshua-vogelstein-connectomes-and-prospective-learning-1</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p>Jovo, as you'll learn, is theoretically oriented, and enjoys the formalism of mathematics to approach questions that begin with a sense of wonder. So after I learn more about his overall approach, the first topic we discuss is the world's currently largest map of an entire brain... the connectome of an insect, the fruit fly. We talk about his role in this collaborative effort, what the heck a connectome is, why it's useful and what to do with it, and so on.</p>



<p>The second main topic we discuss is his theoretical work on what his team has called prospective learning. Prospective learning differs in a fundamental way from the vast majority of AI these days, which they call retrospective learning. So we discuss what prospective learning is, and how it may improve AI moving forward.</p>



<p>At some point there's a little audio/video sync issues crop up, so we switched to another recording method and fixed it... so just hang tight if you're viewing the podcast... it'll get better soon.</p>



<p>0:00 - Intro
05:25 - Jovo's approach
13:10 - Connectome of a fruit fly
26:39 - What to do with a connectome
37:04 - How important is a connectome?
51:48 - Prospective learning
1:15:20 - Efficiency
1:17:38 - AI doomerism</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Support the show to get full episodes and join the Discord community.









Jovo, as you'll learn, is theoretically oriented, and enjoys the formalism of mathematics to approach questions that begin with a sense of wonder. So after I learn more about his overall approach, the first topic we discuss is the world's currently largest map of an entire brain... the connectome of an insect, the fruit fly. We talk about his role in this collaborative effort, what the heck a connectome is, why it's useful and what to do with it, and so on.



The second main topic we discuss is his theoretical work on what his team has called prospective learning. Prospective learning differs in a fundamental way from the vast majority of AI these days, which they call retrospective learning. So we discuss what prospective learning is, and how it may improve AI moving forward.



At some point there's a little audio/video sync issues crop up, so we switched to another recording method and fixed it... so just hang tight if you're viewing the podcast... it'll get better soon.



0:00 - Intro
05:25 - Jovo's approach
13:10 - Connectome of a fruit fly
26:39 - What to do with a connectome
37:04 - How important is a connectome?
51:48 - Prospective learning
1:15:20 - Efficiency
1:17:38 - AI doomerism]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 189 Joshua Vogelstein: Connectomes and Prospective Learning]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p>Jovo, as you'll learn, is theoretically oriented, and enjoys the formalism of mathematics to approach questions that begin with a sense of wonder. So after I learn more about his overall approach, the first topic we discuss is the world's currently largest map of an entire brain... the connectome of an insect, the fruit fly. We talk about his role in this collaborative effort, what the heck a connectome is, why it's useful and what to do with it, and so on.</p>



<p>The second main topic we discuss is his theoretical work on what his team has called prospective learning. Prospective learning differs in a fundamental way from the vast majority of AI these days, which they call retrospective learning. So we discuss what prospective learning is, and how it may improve AI moving forward.</p>



<p>At some point there's a little audio/video sync issues crop up, so we switched to another recording method and fixed it... so just hang tight if you're viewing the podcast... it'll get better soon.</p>



<p>0:00 - Intro
05:25 - Jovo's approach
13:10 - Connectome of a fruit fly
26:39 - What to do with a connectome
37:04 - How important is a connectome?
51:48 - Prospective learning
1:15:20 - Efficiency
1:17:38 - AI doomerism</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/1774346/c1e-jjrraq88j0czr3mg-60kkqwdrfo96-s73xky.mp3" length="84746049"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Support the show to get full episodes and join the Discord community.









Jovo, as you'll learn, is theoretically oriented, and enjoys the formalism of mathematics to approach questions that begin with a sense of wonder. So after I learn more about his overall approach, the first topic we discuss is the world's currently largest map of an entire brain... the connectome of an insect, the fruit fly. We talk about his role in this collaborative effort, what the heck a connectome is, why it's useful and what to do with it, and so on.



The second main topic we discuss is his theoretical work on what his team has called prospective learning. Prospective learning differs in a fundamental way from the vast majority of AI these days, which they call retrospective learning. So we discuss what prospective learning is, and how it may improve AI moving forward.



At some point there's a little audio/video sync issues crop up, so we switched to another recording method and fixed it... so just hang tight if you're viewing the podcast... it'll get better soon.



0:00 - Intro
05:25 - Jovo's approach
13:10 - Connectome of a fruit fly
26:39 - What to do with a connectome
37:04 - How important is a connectome?
51:48 - Prospective learning
1:15:20 - Efficiency
1:17:38 - AI doomerism]]>
                </itunes:summary>
                                                                            <itunes:duration>01:27:19</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 188 Jolande Fooken: Coordinating Action and Perception]]>
                </title>
                <pubDate>Mon, 27 May 2024 15:56:17 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/1750117</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-188-jolande-fooken-coordinating-action-and-perception</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>







<p>Jolande Fooken is a post-postdoctoral researcher interested in how we move our eyes and move our hands together to accomplish naturalistic tasks. Hand-eye coordination is one of those things that sounds simple and we do it all the time to make meals for our children day in, and day out, and day in, and day out. But it becomes way less seemingly simple as soon as you learn how we make various kinds of eye movements, and how we make various kinds of hand movements, and use various strategies to do repeated tasks. And like everything in the brain sciences, it's something we don't have a perfect story for yet. So, Jolande and I discuss her work, and thoughts, and ideas around those and related topics.</p>



<ul>
<li><a href="https://ookenfooken.github.io/">Jolande's website</a>.</li>



<li>Twitter: <a href="https://twitter.com/Ookenfooken">@ookenfooken</a>.</li>



<li>Related papers
<ul>
<li><a href="https://thefemalescientist.com/article/jolande-fooken/3262/i-am-a-parent-i-am-a-scientist/">I am a parent. I am a scientist</a>.</li>



<li><a href="https://ookenfooken.github.io/files/FookenEtal.JoV.2016.pdf">Eye movement accuracy determines natural interception strategies</a>.</li>



<li><a href="https://ookenfooken.github.io/files/FookenEtAl.JNeurosci.2023.pdf">Perceptual-cognitive integration for goal-directed action in naturalistic environments</a>.</li>
</ul>
</li>
</ul>



<p>0:00 - Intro
3:27 - Eye movements
8:53 - Hand-eye coordination
9:30 - Hand-eye coordination and naturalistic tasks
26:45 - Levels of expertise
34:02 - Yarbus and eye movements
42:13 - Varieties of experimental paradigms, varieties of viewing the brain
52:46 - Career vision
1:04:07 - Evolving view about the brain
1:10:49 - Coordination, robots, and AI</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Support the show to get full episodes and join the Discord community.











Jolande Fooken is a post-postdoctoral researcher interested in how we move our eyes and move our hands together to accomplish naturalistic tasks. Hand-eye coordination is one of those things that sounds simple and we do it all the time to make meals for our children day in, and day out, and day in, and day out. But it becomes way less seemingly simple as soon as you learn how we make various kinds of eye movements, and how we make various kinds of hand movements, and use various strategies to do repeated tasks. And like everything in the brain sciences, it's something we don't have a perfect story for yet. So, Jolande and I discuss her work, and thoughts, and ideas around those and related topics.




Jolande's website.



Twitter: @ookenfooken.



Related papers

I am a parent. I am a scientist.



Eye movement accuracy determines natural interception strategies.



Perceptual-cognitive integration for goal-directed action in naturalistic environments.






0:00 - Intro
3:27 - Eye movements
8:53 - Hand-eye coordination
9:30 - Hand-eye coordination and naturalistic tasks
26:45 - Levels of expertise
34:02 - Yarbus and eye movements
42:13 - Varieties of experimental paradigms, varieties of viewing the brain
52:46 - Career vision
1:04:07 - Evolving view about the brain
1:10:49 - Coordination, robots, and AI]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 188 Jolande Fooken: Coordinating Action and Perception]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>







<p>Jolande Fooken is a post-postdoctoral researcher interested in how we move our eyes and move our hands together to accomplish naturalistic tasks. Hand-eye coordination is one of those things that sounds simple and we do it all the time to make meals for our children day in, and day out, and day in, and day out. But it becomes way less seemingly simple as soon as you learn how we make various kinds of eye movements, and how we make various kinds of hand movements, and use various strategies to do repeated tasks. And like everything in the brain sciences, it's something we don't have a perfect story for yet. So, Jolande and I discuss her work, and thoughts, and ideas around those and related topics.</p>



<ul>
<li><a href="https://ookenfooken.github.io/">Jolande's website</a>.</li>



<li>Twitter: <a href="https://twitter.com/Ookenfooken">@ookenfooken</a>.</li>



<li>Related papers
<ul>
<li><a href="https://thefemalescientist.com/article/jolande-fooken/3262/i-am-a-parent-i-am-a-scientist/">I am a parent. I am a scientist</a>.</li>



<li><a href="https://ookenfooken.github.io/files/FookenEtal.JoV.2016.pdf">Eye movement accuracy determines natural interception strategies</a>.</li>



<li><a href="https://ookenfooken.github.io/files/FookenEtAl.JNeurosci.2023.pdf">Perceptual-cognitive integration for goal-directed action in naturalistic environments</a>.</li>
</ul>
</li>
</ul>



<p>0:00 - Intro
3:27 - Eye movements
8:53 - Hand-eye coordination
9:30 - Hand-eye coordination and naturalistic tasks
26:45 - Levels of expertise
34:02 - Yarbus and eye movements
42:13 - Varieties of experimental paradigms, varieties of viewing the brain
52:46 - Career vision
1:04:07 - Evolving view about the brain
1:10:49 - Coordination, robots, and AI</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/1750117/c1e-pjzza513odfkz89r-o87q66nxf4m2-vlghpp.mp3" length="85683562"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Support the show to get full episodes and join the Discord community.











Jolande Fooken is a post-postdoctoral researcher interested in how we move our eyes and move our hands together to accomplish naturalistic tasks. Hand-eye coordination is one of those things that sounds simple and we do it all the time to make meals for our children day in, and day out, and day in, and day out. But it becomes way less seemingly simple as soon as you learn how we make various kinds of eye movements, and how we make various kinds of hand movements, and use various strategies to do repeated tasks. And like everything in the brain sciences, it's something we don't have a perfect story for yet. So, Jolande and I discuss her work, and thoughts, and ideas around those and related topics.




Jolande's website.



Twitter: @ookenfooken.



Related papers

I am a parent. I am a scientist.



Eye movement accuracy determines natural interception strategies.



Perceptual-cognitive integration for goal-directed action in naturalistic environments.






0:00 - Intro
3:27 - Eye movements
8:53 - Hand-eye coordination
9:30 - Hand-eye coordination and naturalistic tasks
26:45 - Levels of expertise
34:02 - Yarbus and eye movements
42:13 - Varieties of experimental paradigms, varieties of viewing the brain
52:46 - Career vision
1:04:07 - Evolving view about the brain
1:10:49 - Coordination, robots, and AI]]>
                </itunes:summary>
                                                                            <itunes:duration>01:28:14</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 187: COSYNE 2024 Neuro-AI Panel]]>
                </title>
                <pubDate>Sat, 20 Apr 2024 16:27:27 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/1725283</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-187-cosyne-2024-neuro-ai-panel</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p>Recently I was invited to moderate a panel at the annual Computational and Systems Neuroscience, or COSYNE, conference. This year was the 20th anniversary of COSYNE, and we were in Lisbon Porturgal. The panel goal was to discuss the relationship between neuroscience and AI. The panelists were Tony Zador, Alex Pouget, Blaise Aguera y Arcas, Kim Stachenfeld, Jonathan Pillow, and Eva Dyer. And I'll let them introduce themselves soon. Two of the panelists, Tony and Alex, co-founded COSYNE those 20 years ago, and they continue to have different views about the neuro-AI relationship. Tony has been on the podcast before and will return soon, and I'll also have Kim Stachenfeld on in a couple episodes. I think this was a fun discussion, and I hope you enjoy it. There's plenty of back and forth, a wide range of opinions, and some criticism from one of the audience questioners. This is an edited audio version, to remove long dead space and such. There's about 30 minutes of just panel, then the panel starts fielding questions from the audience. </p>



<ul>
<li><a href="https://www.cosyne.org/">COSYNE</a>.</li>
</ul>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Support the show to get full episodes and join the Discord community.









Recently I was invited to moderate a panel at the annual Computational and Systems Neuroscience, or COSYNE, conference. This year was the 20th anniversary of COSYNE, and we were in Lisbon Porturgal. The panel goal was to discuss the relationship between neuroscience and AI. The panelists were Tony Zador, Alex Pouget, Blaise Aguera y Arcas, Kim Stachenfeld, Jonathan Pillow, and Eva Dyer. And I'll let them introduce themselves soon. Two of the panelists, Tony and Alex, co-founded COSYNE those 20 years ago, and they continue to have different views about the neuro-AI relationship. Tony has been on the podcast before and will return soon, and I'll also have Kim Stachenfeld on in a couple episodes. I think this was a fun discussion, and I hope you enjoy it. There's plenty of back and forth, a wide range of opinions, and some criticism from one of the audience questioners. This is an edited audio version, to remove long dead space and such. There's about 30 minutes of just panel, then the panel starts fielding questions from the audience. 




COSYNE.
]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 187: COSYNE 2024 Neuro-AI Panel]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p>Recently I was invited to moderate a panel at the annual Computational and Systems Neuroscience, or COSYNE, conference. This year was the 20th anniversary of COSYNE, and we were in Lisbon Porturgal. The panel goal was to discuss the relationship between neuroscience and AI. The panelists were Tony Zador, Alex Pouget, Blaise Aguera y Arcas, Kim Stachenfeld, Jonathan Pillow, and Eva Dyer. And I'll let them introduce themselves soon. Two of the panelists, Tony and Alex, co-founded COSYNE those 20 years ago, and they continue to have different views about the neuro-AI relationship. Tony has been on the podcast before and will return soon, and I'll also have Kim Stachenfeld on in a couple episodes. I think this was a fun discussion, and I hope you enjoy it. There's plenty of back and forth, a wide range of opinions, and some criticism from one of the audience questioners. This is an edited audio version, to remove long dead space and such. There's about 30 minutes of just panel, then the panel starts fielding questions from the audience. </p>



<ul>
<li><a href="https://www.cosyne.org/">COSYNE</a>.</li>
</ul>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/1725283/c1e-9077sno7d7t09zp0-wngpz51vbd0n-epmlze.mp3" length="61333340"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Support the show to get full episodes and join the Discord community.









Recently I was invited to moderate a panel at the annual Computational and Systems Neuroscience, or COSYNE, conference. This year was the 20th anniversary of COSYNE, and we were in Lisbon Porturgal. The panel goal was to discuss the relationship between neuroscience and AI. The panelists were Tony Zador, Alex Pouget, Blaise Aguera y Arcas, Kim Stachenfeld, Jonathan Pillow, and Eva Dyer. And I'll let them introduce themselves soon. Two of the panelists, Tony and Alex, co-founded COSYNE those 20 years ago, and they continue to have different views about the neuro-AI relationship. Tony has been on the podcast before and will return soon, and I'll also have Kim Stachenfeld on in a couple episodes. I think this was a fun discussion, and I hope you enjoy it. There's plenty of back and forth, a wide range of opinions, and some criticism from one of the audience questioners. This is an edited audio version, to remove long dead space and such. There's about 30 minutes of just panel, then the panel starts fielding questions from the audience. 




COSYNE.
]]>
                </itunes:summary>
                                                                            <itunes:duration>01:03:35</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 186 Mazviita Chirimuuta: The Brain Abstracted]]>
                </title>
                <pubDate>Mon, 25 Mar 2024 22:39:36 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/1702648</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-186-mazviita-chirimuuta-the-brain-abstracted-1</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>







<p>Mazviita Chirimuuta is a philosopher at the University of Edinburgh. Today we discuss topics from her new book, <a href="https://mitpress.mit.edu/9780262548045/the-brain-abstracted/">The Brain Abstracted: Simplification in the History and Philosophy of Neuroscience</a>. </p>



<p>She largely argues that when we try to understand something complex, like the brain, using models, and math, and analogies, for example - we should keep in mind these are all ways of simplifying and abstracting away details to give us something we actually can understand. And, when we do science, every tool we use and perspective we bring, every way we try to attack a problem, these are all both necessary to do the science and limit the interpretation we can claim from our results. She does all this and more by exploring many topics in neuroscience and philosophy throughout the book, many of which we discuss today.</p>





<ul>
<li><a href="https://www.ed.ac.uk/profile/mazviita-chirimuuta">Mazviita's University of Edinburgh page</a>.</li>



<li><a href="https://mitpress.mit.edu/9780262548045/the-brain-abstracted/">The Brain Abstracted: Simplification in the History and Philosophy of Neuroscience</a>.</li>



<li>Previous Brain Inspired episodes:
<ul>
<li><a href="https://braininspired.co/podcast/72/">BI 072 Mazviita Chirimuuta: Understanding, Prediction, and Reality</a></li>



<li><a href="https://braininspired.co/podcast/114/">BI 114 Mark Sprevak and Mazviita Chirimuuta: Computation and the Mind</a></li>
</ul>
</li>
</ul>



<p>0:00 - Intro
5:28 - Neuroscience to philosophy
13:39 - Big themes of the book
27:44 - Simplifying by mathematics
32:19 - Simplifying by reduction
42:55 - Simplification by analogy
46:33 - Technology precedes science
55:04 - Theory, technology, and understanding
58:04 - Cross-disciplinary progress
58:45 - Complex vs. simple(r) systems
1:08:07 - Is science bound to study stability?
1:13:20 - 4E for philosophy but not neuroscience?
1:28:50 - ANNs as models
1:38:38 - Study of mind</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Support the show to get full episodes and join the Discord community.











Mazviita Chirimuuta is a philosopher at the University of Edinburgh. Today we discuss topics from her new book, The Brain Abstracted: Simplification in the History and Philosophy of Neuroscience. 



She largely argues that when we try to understand something complex, like the brain, using models, and math, and analogies, for example - we should keep in mind these are all ways of simplifying and abstracting away details to give us something we actually can understand. And, when we do science, every tool we use and perspective we bring, every way we try to attack a problem, these are all both necessary to do the science and limit the interpretation we can claim from our results. She does all this and more by exploring many topics in neuroscience and philosophy throughout the book, many of which we discuss today.






Mazviita's University of Edinburgh page.



The Brain Abstracted: Simplification in the History and Philosophy of Neuroscience.



Previous Brain Inspired episodes:

BI 072 Mazviita Chirimuuta: Understanding, Prediction, and Reality



BI 114 Mark Sprevak and Mazviita Chirimuuta: Computation and the Mind






0:00 - Intro
5:28 - Neuroscience to philosophy
13:39 - Big themes of the book
27:44 - Simplifying by mathematics
32:19 - Simplifying by reduction
42:55 - Simplification by analogy
46:33 - Technology precedes science
55:04 - Theory, technology, and understanding
58:04 - Cross-disciplinary progress
58:45 - Complex vs. simple(r) systems
1:08:07 - Is science bound to study stability?
1:13:20 - 4E for philosophy but not neuroscience?
1:28:50 - ANNs as models
1:38:38 - Study of mind]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 186 Mazviita Chirimuuta: The Brain Abstracted]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>







<p>Mazviita Chirimuuta is a philosopher at the University of Edinburgh. Today we discuss topics from her new book, <a href="https://mitpress.mit.edu/9780262548045/the-brain-abstracted/">The Brain Abstracted: Simplification in the History and Philosophy of Neuroscience</a>. </p>



<p>She largely argues that when we try to understand something complex, like the brain, using models, and math, and analogies, for example - we should keep in mind these are all ways of simplifying and abstracting away details to give us something we actually can understand. And, when we do science, every tool we use and perspective we bring, every way we try to attack a problem, these are all both necessary to do the science and limit the interpretation we can claim from our results. She does all this and more by exploring many topics in neuroscience and philosophy throughout the book, many of which we discuss today.</p>





<ul>
<li><a href="https://www.ed.ac.uk/profile/mazviita-chirimuuta">Mazviita's University of Edinburgh page</a>.</li>



<li><a href="https://mitpress.mit.edu/9780262548045/the-brain-abstracted/">The Brain Abstracted: Simplification in the History and Philosophy of Neuroscience</a>.</li>



<li>Previous Brain Inspired episodes:
<ul>
<li><a href="https://braininspired.co/podcast/72/">BI 072 Mazviita Chirimuuta: Understanding, Prediction, and Reality</a></li>



<li><a href="https://braininspired.co/podcast/114/">BI 114 Mark Sprevak and Mazviita Chirimuuta: Computation and the Mind</a></li>
</ul>
</li>
</ul>



<p>0:00 - Intro
5:28 - Neuroscience to philosophy
13:39 - Big themes of the book
27:44 - Simplifying by mathematics
32:19 - Simplifying by reduction
42:55 - Simplification by analogy
46:33 - Technology precedes science
55:04 - Theory, technology, and understanding
58:04 - Cross-disciplinary progress
58:45 - Complex vs. simple(r) systems
1:08:07 - Is science bound to study stability?
1:13:20 - 4E for philosophy but not neuroscience?
1:28:50 - ANNs as models
1:38:38 - Study of mind</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/1702648/c1e-m1mmsn33rpt381p4-qxnnp67pt7-hmtw65.mp3" length="100729239"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Support the show to get full episodes and join the Discord community.











Mazviita Chirimuuta is a philosopher at the University of Edinburgh. Today we discuss topics from her new book, The Brain Abstracted: Simplification in the History and Philosophy of Neuroscience. 



She largely argues that when we try to understand something complex, like the brain, using models, and math, and analogies, for example - we should keep in mind these are all ways of simplifying and abstracting away details to give us something we actually can understand. And, when we do science, every tool we use and perspective we bring, every way we try to attack a problem, these are all both necessary to do the science and limit the interpretation we can claim from our results. She does all this and more by exploring many topics in neuroscience and philosophy throughout the book, many of which we discuss today.






Mazviita's University of Edinburgh page.



The Brain Abstracted: Simplification in the History and Philosophy of Neuroscience.



Previous Brain Inspired episodes:

BI 072 Mazviita Chirimuuta: Understanding, Prediction, and Reality



BI 114 Mark Sprevak and Mazviita Chirimuuta: Computation and the Mind






0:00 - Intro
5:28 - Neuroscience to philosophy
13:39 - Big themes of the book
27:44 - Simplifying by mathematics
32:19 - Simplifying by reduction
42:55 - Simplification by analogy
46:33 - Technology precedes science
55:04 - Theory, technology, and understanding
58:04 - Cross-disciplinary progress
58:45 - Complex vs. simple(r) systems
1:08:07 - Is science bound to study stability?
1:13:20 - 4E for philosophy but not neuroscience?
1:28:50 - ANNs as models
1:38:38 - Study of mind]]>
                </itunes:summary>
                                                                            <itunes:duration>01:43:34</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 185 Eric Yttri: Orchestrating Behavior]]>
                </title>
                <pubDate>Wed, 06 Mar 2024 14:56:51 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/1681441</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-185-eric-yttri-orchestrating-behavior-1</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>







<p>As some of you know, I recently got back into the research world, and in particular I work in Eric Yttris' lab at Carnegie Mellon University.</p>



<p>Eric's lab studies the relationship between various kinds of behaviors and the neural activity in a few areas known to be involved in enacting and shaping those behaviors, namely the motor cortex and basal ganglia.  And study that, he uses tools like optogentics, neuronal recordings, and stimulations, while mice perform certain tasks, or, in my case, while they freely behave wandering around an enclosed space.</p>



<p>We talk about how Eric got here, how and why the motor cortex and basal ganglia are still mysteries despite lots of theories and experimental work, Eric's work on trying to solve those mysteries using both trained tasks and more naturalistic behavior. We talk about the valid question, "What is a behavior?", and lots more.</p>



<p><a href="https://labs.bio.cmu.edu/yttri/">Yttri Lab</a></p>



<ul>
<li>Twitter: <a href="https://twitter.com/YttriLab">@YttriLab</a></li>



<li>Related papers
<ul>
<li><a href="https://pubmed.ncbi.nlm.nih.gov/27135927/">Opponent and bidirectional control of movement velocity in the basal ganglia</a>.</li>



<li><a href="https://www.nature.com/articles/s41467-021-25420-x?proof=tr">B-SOiD, an open-source unsupervised algorithm for identification and fast prediction of behaviors</a>.</li>
</ul>
</li>
</ul>



<p>0:00 - Intro
2:36 - Eric's background
14:47 - Different animal models
17:59 - ANNs as models for animal brains
24:34 - Main question
25:43 - How circuits produce appropriate behaviors
26:10 - Cerebellum
27:49 - What do motor cortex and basal ganglia do?
49:12 - Neuroethology
1:06:09 - What is a behavior?
1:11:18 - Categorize behavior (B-SOiD)
1:22:01 - Real behavior vs. ANNs
1:33:09 - Best era in neuroscience</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Support the show to get full episodes and join the Discord community.











As some of you know, I recently got back into the research world, and in particular I work in Eric Yttris' lab at Carnegie Mellon University.



Eric's lab studies the relationship between various kinds of behaviors and the neural activity in a few areas known to be involved in enacting and shaping those behaviors, namely the motor cortex and basal ganglia.  And study that, he uses tools like optogentics, neuronal recordings, and stimulations, while mice perform certain tasks, or, in my case, while they freely behave wandering around an enclosed space.



We talk about how Eric got here, how and why the motor cortex and basal ganglia are still mysteries despite lots of theories and experimental work, Eric's work on trying to solve those mysteries using both trained tasks and more naturalistic behavior. We talk about the valid question, "What is a behavior?", and lots more.



Yttri Lab




Twitter: @YttriLab



Related papers

Opponent and bidirectional control of movement velocity in the basal ganglia.



B-SOiD, an open-source unsupervised algorithm for identification and fast prediction of behaviors.






0:00 - Intro
2:36 - Eric's background
14:47 - Different animal models
17:59 - ANNs as models for animal brains
24:34 - Main question
25:43 - How circuits produce appropriate behaviors
26:10 - Cerebellum
27:49 - What do motor cortex and basal ganglia do?
49:12 - Neuroethology
1:06:09 - What is a behavior?
1:11:18 - Categorize behavior (B-SOiD)
1:22:01 - Real behavior vs. ANNs
1:33:09 - Best era in neuroscience]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 185 Eric Yttri: Orchestrating Behavior]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>







<p>As some of you know, I recently got back into the research world, and in particular I work in Eric Yttris' lab at Carnegie Mellon University.</p>



<p>Eric's lab studies the relationship between various kinds of behaviors and the neural activity in a few areas known to be involved in enacting and shaping those behaviors, namely the motor cortex and basal ganglia.  And study that, he uses tools like optogentics, neuronal recordings, and stimulations, while mice perform certain tasks, or, in my case, while they freely behave wandering around an enclosed space.</p>



<p>We talk about how Eric got here, how and why the motor cortex and basal ganglia are still mysteries despite lots of theories and experimental work, Eric's work on trying to solve those mysteries using both trained tasks and more naturalistic behavior. We talk about the valid question, "What is a behavior?", and lots more.</p>



<p><a href="https://labs.bio.cmu.edu/yttri/">Yttri Lab</a></p>



<ul>
<li>Twitter: <a href="https://twitter.com/YttriLab">@YttriLab</a></li>



<li>Related papers
<ul>
<li><a href="https://pubmed.ncbi.nlm.nih.gov/27135927/">Opponent and bidirectional control of movement velocity in the basal ganglia</a>.</li>



<li><a href="https://www.nature.com/articles/s41467-021-25420-x?proof=tr">B-SOiD, an open-source unsupervised algorithm for identification and fast prediction of behaviors</a>.</li>
</ul>
</li>
</ul>



<p>0:00 - Intro
2:36 - Eric's background
14:47 - Different animal models
17:59 - ANNs as models for animal brains
24:34 - Main question
25:43 - How circuits produce appropriate behaviors
26:10 - Cerebellum
27:49 - What do motor cortex and basal ganglia do?
49:12 - Neuroethology
1:06:09 - What is a behavior?
1:11:18 - Categorize behavior (B-SOiD)
1:22:01 - Real behavior vs. ANNs
1:33:09 - Best era in neuroscience</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/1681441/c1e-gkooh3govguzrr6q-mq3vgng7uzog-jelajk.mp3" length="101900995"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Support the show to get full episodes and join the Discord community.











As some of you know, I recently got back into the research world, and in particular I work in Eric Yttris' lab at Carnegie Mellon University.



Eric's lab studies the relationship between various kinds of behaviors and the neural activity in a few areas known to be involved in enacting and shaping those behaviors, namely the motor cortex and basal ganglia.  And study that, he uses tools like optogentics, neuronal recordings, and stimulations, while mice perform certain tasks, or, in my case, while they freely behave wandering around an enclosed space.



We talk about how Eric got here, how and why the motor cortex and basal ganglia are still mysteries despite lots of theories and experimental work, Eric's work on trying to solve those mysteries using both trained tasks and more naturalistic behavior. We talk about the valid question, "What is a behavior?", and lots more.



Yttri Lab




Twitter: @YttriLab



Related papers

Opponent and bidirectional control of movement velocity in the basal ganglia.



B-SOiD, an open-source unsupervised algorithm for identification and fast prediction of behaviors.






0:00 - Intro
2:36 - Eric's background
14:47 - Different animal models
17:59 - ANNs as models for animal brains
24:34 - Main question
25:43 - How circuits produce appropriate behaviors
26:10 - Cerebellum
27:49 - What do motor cortex and basal ganglia do?
49:12 - Neuroethology
1:06:09 - What is a behavior?
1:11:18 - Categorize behavior (B-SOiD)
1:22:01 - Real behavior vs. ANNs
1:33:09 - Best era in neuroscience]]>
                </itunes:summary>
                                                                            <itunes:duration>01:44:50</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 184 Peter Stratton: Synthesize Neural Principles]]>
                </title>
                <pubDate>Tue, 20 Feb 2024 02:35:15 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/1665638</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-184-peter-stratton-synthesize-neural-principles-1</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p>Peter Stratton is a research scientist at Queensland University of Technology.</p>





<p>I was pointed toward Pete by a patreon supporter, who sent me a sort of perspective piece Pete wrote that is the main focus of our conversation, although we also talk about some of his work in particular - for example, he works with spiking neural networks, like my last guest, <a href="https://braininspired.co/podcast/183/">Dan Goodman</a>.</p>



<p>What Pete argues for is what he calls a sideways-in approach. So a bottom-up approach is to build things like we find them in the brain, put them together, and voila, we'll get cognition. A top-down approach, the current approach in AI, is to train a system to perform a task, give it some algorithms to run, and fiddle with the architecture and lower level details until you pass your favorite benchmark test. Pete is focused more on the principles of computation brains employ that current AI doesn't. If you're familiar with David Marr, this is akin to his so-called "algorithmic level", but it's between that and the "implementation level", I'd say. Because Pete is focused on the synthesis of different kinds of brain operations - how they intermingle to perform computations and produce emergent properties. So he thinks more like a systems neuroscientist in that respect. Figuring that out is figuring out how to make better AI, Pete says. So we discuss a handful of those principles, all through the lens of how challenging a task it is to synthesize multiple principles into a coherent functioning whole (as opposed to a collection of parts). Buy, hey, evolution did it, so I'm sure we can, too, right?</p>



<ul>
<li><a href="http://neuro-ai.info/index.html">Peter's website</a>.</li>



<li>Related papers
<ul>
<li><a href="https://link.springer.com/article/10.1007/s12559-023-10181-0">Convolutionary, Evolutionary, and Revolutionary: What’s Next for Brains, Bodies, and AI?</a></li>



<li><a href="https://arxiv.org/abs/2208.01204">Making a Spiking Net Work: Robust brain-like unsupervised machine learning</a>.</li>



<li><a href="https://www.frontiersin.org/articles/10.3389/fnsys.2015.00119/full">Global segregation of cortical activity and metastable dynamics</a>.</li>



<li><a href="https://physoc.onlinelibrary.wiley.com/doi/am-pdf/10.1113/jp271444">Unlocking neural complexity with a robotic key</a></li>
</ul>
</li>
</ul>



<p>0:00 - Intro
3:50 - AI background, neuroscience principles
8:00 - Overall view of modern AI
14:14 - Moravec's paradox and robotics
20:50 -Understanding movement to understand cognition
30:01 - How close are we to understanding brains/minds?
32:17 - Pete's goal
34:43 - Principles from neuroscience to build AI
42:39 - Levels of abstraction and implementation
49:57 - Mental disorders and robustness
55:58 - Function vs. implementation
1:04:04 - Spiking networks
1:07:57 - The roadmap
1:19:10 - AGI
1:23:48 - The terms AGI and AI
1:26:12 - Consciousness</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Support the show to get full episodes and join the Discord community.









Peter Stratton is a research scientist at Queensland University of Technology.





I was pointed toward Pete by a patreon supporter, who sent me a sort of perspective piece Pete wrote that is the main focus of our conversation, although we also talk about some of his work in particular - for example, he works with spiking neural networks, like my last guest, Dan Goodman.



What Pete argues for is what he calls a sideways-in approach. So a bottom-up approach is to build things like we find them in the brain, put them together, and voila, we'll get cognition. A top-down approach, the current approach in AI, is to train a system to perform a task, give it some algorithms to run, and fiddle with the architecture and lower level details until you pass your favorite benchmark test. Pete is focused more on the principles of computation brains employ that current AI doesn't. If you're familiar with David Marr, this is akin to his so-called "algorithmic level", but it's between that and the "implementation level", I'd say. Because Pete is focused on the synthesis of different kinds of brain operations - how they intermingle to perform computations and produce emergent properties. So he thinks more like a systems neuroscientist in that respect. Figuring that out is figuring out how to make better AI, Pete says. So we discuss a handful of those principles, all through the lens of how challenging a task it is to synthesize multiple principles into a coherent functioning whole (as opposed to a collection of parts). Buy, hey, evolution did it, so I'm sure we can, too, right?




Peter's website.



Related papers

Convolutionary, Evolutionary, and Revolutionary: What’s Next for Brains, Bodies, and AI?



Making a Spiking Net Work: Robust brain-like unsupervised machine learning.



Global segregation of cortical activity and metastable dynamics.



Unlocking neural complexity with a robotic key






0:00 - Intro
3:50 - AI background, neuroscience principles
8:00 - Overall view of modern AI
14:14 - Moravec's paradox and robotics
20:50 -Understanding movement to understand cognition
30:01 - How close are we to understanding brains/minds?
32:17 - Pete's goal
34:43 - Principles from neuroscience to build AI
42:39 - Levels of abstraction and implementation
49:57 - Mental disorders and robustness
55:58 - Function vs. implementation
1:04:04 - Spiking networks
1:07:57 - The roadmap
1:19:10 - AGI
1:23:48 - The terms AGI and AI
1:26:12 - Consciousness]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 184 Peter Stratton: Synthesize Neural Principles]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p>Peter Stratton is a research scientist at Queensland University of Technology.</p>





<p>I was pointed toward Pete by a patreon supporter, who sent me a sort of perspective piece Pete wrote that is the main focus of our conversation, although we also talk about some of his work in particular - for example, he works with spiking neural networks, like my last guest, <a href="https://braininspired.co/podcast/183/">Dan Goodman</a>.</p>



<p>What Pete argues for is what he calls a sideways-in approach. So a bottom-up approach is to build things like we find them in the brain, put them together, and voila, we'll get cognition. A top-down approach, the current approach in AI, is to train a system to perform a task, give it some algorithms to run, and fiddle with the architecture and lower level details until you pass your favorite benchmark test. Pete is focused more on the principles of computation brains employ that current AI doesn't. If you're familiar with David Marr, this is akin to his so-called "algorithmic level", but it's between that and the "implementation level", I'd say. Because Pete is focused on the synthesis of different kinds of brain operations - how they intermingle to perform computations and produce emergent properties. So he thinks more like a systems neuroscientist in that respect. Figuring that out is figuring out how to make better AI, Pete says. So we discuss a handful of those principles, all through the lens of how challenging a task it is to synthesize multiple principles into a coherent functioning whole (as opposed to a collection of parts). Buy, hey, evolution did it, so I'm sure we can, too, right?</p>



<ul>
<li><a href="http://neuro-ai.info/index.html">Peter's website</a>.</li>



<li>Related papers
<ul>
<li><a href="https://link.springer.com/article/10.1007/s12559-023-10181-0">Convolutionary, Evolutionary, and Revolutionary: What’s Next for Brains, Bodies, and AI?</a></li>



<li><a href="https://arxiv.org/abs/2208.01204">Making a Spiking Net Work: Robust brain-like unsupervised machine learning</a>.</li>



<li><a href="https://www.frontiersin.org/articles/10.3389/fnsys.2015.00119/full">Global segregation of cortical activity and metastable dynamics</a>.</li>



<li><a href="https://physoc.onlinelibrary.wiley.com/doi/am-pdf/10.1113/jp271444">Unlocking neural complexity with a robotic key</a></li>
</ul>
</li>
</ul>



<p>0:00 - Intro
3:50 - AI background, neuroscience principles
8:00 - Overall view of modern AI
14:14 - Moravec's paradox and robotics
20:50 -Understanding movement to understand cognition
30:01 - How close are we to understanding brains/minds?
32:17 - Pete's goal
34:43 - Principles from neuroscience to build AI
42:39 - Levels of abstraction and implementation
49:57 - Mental disorders and robustness
55:58 - Function vs. implementation
1:04:04 - Spiking networks
1:07:57 - The roadmap
1:19:10 - AGI
1:23:48 - The terms AGI and AI
1:26:12 - Consciousness</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/1665638/c1e-q488i2ox39s6x4jg-04m87gr2cwvk-ylsfua.mp3" length="88496242"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Support the show to get full episodes and join the Discord community.









Peter Stratton is a research scientist at Queensland University of Technology.





I was pointed toward Pete by a patreon supporter, who sent me a sort of perspective piece Pete wrote that is the main focus of our conversation, although we also talk about some of his work in particular - for example, he works with spiking neural networks, like my last guest, Dan Goodman.



What Pete argues for is what he calls a sideways-in approach. So a bottom-up approach is to build things like we find them in the brain, put them together, and voila, we'll get cognition. A top-down approach, the current approach in AI, is to train a system to perform a task, give it some algorithms to run, and fiddle with the architecture and lower level details until you pass your favorite benchmark test. Pete is focused more on the principles of computation brains employ that current AI doesn't. If you're familiar with David Marr, this is akin to his so-called "algorithmic level", but it's between that and the "implementation level", I'd say. Because Pete is focused on the synthesis of different kinds of brain operations - how they intermingle to perform computations and produce emergent properties. So he thinks more like a systems neuroscientist in that respect. Figuring that out is figuring out how to make better AI, Pete says. So we discuss a handful of those principles, all through the lens of how challenging a task it is to synthesize multiple principles into a coherent functioning whole (as opposed to a collection of parts). Buy, hey, evolution did it, so I'm sure we can, too, right?




Peter's website.



Related papers

Convolutionary, Evolutionary, and Revolutionary: What’s Next for Brains, Bodies, and AI?



Making a Spiking Net Work: Robust brain-like unsupervised machine learning.



Global segregation of cortical activity and metastable dynamics.



Unlocking neural complexity with a robotic key






0:00 - Intro
3:50 - AI background, neuroscience principles
8:00 - Overall view of modern AI
14:14 - Moravec's paradox and robotics
20:50 -Understanding movement to understand cognition
30:01 - How close are we to understanding brains/minds?
32:17 - Pete's goal
34:43 - Principles from neuroscience to build AI
42:39 - Levels of abstraction and implementation
49:57 - Mental disorders and robustness
55:58 - Function vs. implementation
1:04:04 - Spiking networks
1:07:57 - The roadmap
1:19:10 - AGI
1:23:48 - The terms AGI and AI
1:26:12 - Consciousness]]>
                </itunes:summary>
                                                                            <itunes:duration>01:30:47</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 183 Dan Goodman: Neural Reckoning]]>
                </title>
                <pubDate>Tue, 06 Feb 2024 23:57:15 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/1656224</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-183-dan-goodman-neural-reckoning-1</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>







<p>You may know my guest as the co-founder of Neuromatch, the excellent online computational neuroscience academy, or as the creator of the Brian spiking neural network simulator, which is freely available. I know him as a spiking neural network practitioner extraordinaire. Dan Goodman runs the Neural Reckoning Group at Imperial College London, where they use spiking neural networks to figure out how biological and artificial brains reckon, or compute.</p>



<p>All of the current AI we use to do all the impressive things we do, essentially all of it, is built on artificial neural networks. Notice the word "neural" there. That word is meant to communicate that these artificial networks do stuff the way our brains do stuff. And indeed, if you take a few steps back, spin around 10 times, take a few shots of whiskey, and squint hard enough, there is a passing resemblance. One thing you'll probably still notice, in your drunken stupor, is that, among the thousand ways ANNs differ from brains, is that they don't use action potentials, or spikes. From the perspective of neuroscience, that can seem mighty curious. Because, for decades now, neuroscience has focused on spikes as <em>the</em> things that make our cognition tick.</p>



<p>We count them and compare them in different conditions, and generally put a lot of stock in their usefulness in brains.</p>



<p>So what does it mean that modern neural networks disregard spiking altogether?</p>



<p>Maybe spiking really isn't important to process and transmit information as well as our brains do. Or maybe spiking is one among many ways for intelligent systems to function well. Dan shares some of what he's learned and how he thinks about spiking and SNNs and a host of other topics.</p>



<ul>
<li><a href="https://neural-reckoning.org/">Neural Reckoning Group</a>.</li>



<li>Twitter: <a href="https://twitter.com/neuralreckoning">@neuralreckoning</a>.</li>



<li>Related papers
<ul>
<li><a href="https://www.nature.com/articles/s41467-021-26022-3.pdf">Neural heterogeneity promotes robust learning</a>.</li>



<li><a href="https://arxiv.org/abs/2106.02626">Dynamics of specialization in neural modules under resource constraints</a>.</li>



<li><a href="https://neural-reckoning.org/pub_multimodal.html">Multimodal units fuse-then-accumulate evidence across channels</a>.</li>



<li><a href="https://www.dropbox.com/s/942rf97l80wyya5/snufa-meeting-report.pdf?dl=1">Visualizing a joint future of neuroscience and neuromorphic engineering</a>.</li>
</ul>
</li>
</ul>



<p>0:00 - Intro
3:47 - Why spiking neural networks, and a mathematical background
13:16 - Efficiency
17:36 - Machine learning for neuroscience
19:38 - Why not jump ship from SNNs?
23:35 - Hard and easy tasks
29:20 - How brains and nets learn
32:50 - Exploratory vs. theory-driven science
37:32 - Static vs. dynamic
39:06 - Heterogeneity
46:01 - Unifying principles vs. a hodgepodge
50:37 - Sparsity
58:05 - Specialization and modularity
1:00:51 - Naturalistic experiments
1:03:41 - Projects for SNN research
1:05:09 - The right level of abstraction
1:07:58 - Obstacles to progress
1:12:30 - Levels of explanation
1:14:51 - What has AI taught neuroscience?
1:22:06 - How has neuroscience helped AI?</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Support the show to get full episodes and join the Discord community.











You may know my guest as the co-founder of Neuromatch, the excellent online computational neuroscience academy, or as the creator of the Brian spiking neural network simulator, which is freely available. I know him as a spiking neural network practitioner extraordinaire. Dan Goodman runs the Neural Reckoning Group at Imperial College London, where they use spiking neural networks to figure out how biological and artificial brains reckon, or compute.



All of the current AI we use to do all the impressive things we do, essentially all of it, is built on artificial neural networks. Notice the word "neural" there. That word is meant to communicate that these artificial networks do stuff the way our brains do stuff. And indeed, if you take a few steps back, spin around 10 times, take a few shots of whiskey, and squint hard enough, there is a passing resemblance. One thing you'll probably still notice, in your drunken stupor, is that, among the thousand ways ANNs differ from brains, is that they don't use action potentials, or spikes. From the perspective of neuroscience, that can seem mighty curious. Because, for decades now, neuroscience has focused on spikes as the things that make our cognition tick.



We count them and compare them in different conditions, and generally put a lot of stock in their usefulness in brains.



So what does it mean that modern neural networks disregard spiking altogether?



Maybe spiking really isn't important to process and transmit information as well as our brains do. Or maybe spiking is one among many ways for intelligent systems to function well. Dan shares some of what he's learned and how he thinks about spiking and SNNs and a host of other topics.




Neural Reckoning Group.



Twitter: @neuralreckoning.



Related papers

Neural heterogeneity promotes robust learning.



Dynamics of specialization in neural modules under resource constraints.



Multimodal units fuse-then-accumulate evidence across channels.



Visualizing a joint future of neuroscience and neuromorphic engineering.






0:00 - Intro
3:47 - Why spiking neural networks, and a mathematical background
13:16 - Efficiency
17:36 - Machine learning for neuroscience
19:38 - Why not jump ship from SNNs?
23:35 - Hard and easy tasks
29:20 - How brains and nets learn
32:50 - Exploratory vs. theory-driven science
37:32 - Static vs. dynamic
39:06 - Heterogeneity
46:01 - Unifying principles vs. a hodgepodge
50:37 - Sparsity
58:05 - Specialization and modularity
1:00:51 - Naturalistic experiments
1:03:41 - Projects for SNN research
1:05:09 - The right level of abstraction
1:07:58 - Obstacles to progress
1:12:30 - Levels of explanation
1:14:51 - What has AI taught neuroscience?
1:22:06 - How has neuroscience helped AI?]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 183 Dan Goodman: Neural Reckoning]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>







<p>You may know my guest as the co-founder of Neuromatch, the excellent online computational neuroscience academy, or as the creator of the Brian spiking neural network simulator, which is freely available. I know him as a spiking neural network practitioner extraordinaire. Dan Goodman runs the Neural Reckoning Group at Imperial College London, where they use spiking neural networks to figure out how biological and artificial brains reckon, or compute.</p>



<p>All of the current AI we use to do all the impressive things we do, essentially all of it, is built on artificial neural networks. Notice the word "neural" there. That word is meant to communicate that these artificial networks do stuff the way our brains do stuff. And indeed, if you take a few steps back, spin around 10 times, take a few shots of whiskey, and squint hard enough, there is a passing resemblance. One thing you'll probably still notice, in your drunken stupor, is that, among the thousand ways ANNs differ from brains, is that they don't use action potentials, or spikes. From the perspective of neuroscience, that can seem mighty curious. Because, for decades now, neuroscience has focused on spikes as <em>the</em> things that make our cognition tick.</p>



<p>We count them and compare them in different conditions, and generally put a lot of stock in their usefulness in brains.</p>



<p>So what does it mean that modern neural networks disregard spiking altogether?</p>



<p>Maybe spiking really isn't important to process and transmit information as well as our brains do. Or maybe spiking is one among many ways for intelligent systems to function well. Dan shares some of what he's learned and how he thinks about spiking and SNNs and a host of other topics.</p>



<ul>
<li><a href="https://neural-reckoning.org/">Neural Reckoning Group</a>.</li>



<li>Twitter: <a href="https://twitter.com/neuralreckoning">@neuralreckoning</a>.</li>



<li>Related papers
<ul>
<li><a href="https://www.nature.com/articles/s41467-021-26022-3.pdf">Neural heterogeneity promotes robust learning</a>.</li>



<li><a href="https://arxiv.org/abs/2106.02626">Dynamics of specialization in neural modules under resource constraints</a>.</li>



<li><a href="https://neural-reckoning.org/pub_multimodal.html">Multimodal units fuse-then-accumulate evidence across channels</a>.</li>



<li><a href="https://www.dropbox.com/s/942rf97l80wyya5/snufa-meeting-report.pdf?dl=1">Visualizing a joint future of neuroscience and neuromorphic engineering</a>.</li>
</ul>
</li>
</ul>



<p>0:00 - Intro
3:47 - Why spiking neural networks, and a mathematical background
13:16 - Efficiency
17:36 - Machine learning for neuroscience
19:38 - Why not jump ship from SNNs?
23:35 - Hard and easy tasks
29:20 - How brains and nets learn
32:50 - Exploratory vs. theory-driven science
37:32 - Static vs. dynamic
39:06 - Heterogeneity
46:01 - Unifying principles vs. a hodgepodge
50:37 - Sparsity
58:05 - Specialization and modularity
1:00:51 - Naturalistic experiments
1:03:41 - Projects for SNN research
1:05:09 - The right level of abstraction
1:07:58 - Obstacles to progress
1:12:30 - Levels of explanation
1:14:51 - What has AI taught neuroscience?
1:22:06 - How has neuroscience helped AI?</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/1656224/c1e-vzkkt8w1kpc98qnk-o8rkv767ug8g-okkoc6.mp3" length="86975539"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Support the show to get full episodes and join the Discord community.











You may know my guest as the co-founder of Neuromatch, the excellent online computational neuroscience academy, or as the creator of the Brian spiking neural network simulator, which is freely available. I know him as a spiking neural network practitioner extraordinaire. Dan Goodman runs the Neural Reckoning Group at Imperial College London, where they use spiking neural networks to figure out how biological and artificial brains reckon, or compute.



All of the current AI we use to do all the impressive things we do, essentially all of it, is built on artificial neural networks. Notice the word "neural" there. That word is meant to communicate that these artificial networks do stuff the way our brains do stuff. And indeed, if you take a few steps back, spin around 10 times, take a few shots of whiskey, and squint hard enough, there is a passing resemblance. One thing you'll probably still notice, in your drunken stupor, is that, among the thousand ways ANNs differ from brains, is that they don't use action potentials, or spikes. From the perspective of neuroscience, that can seem mighty curious. Because, for decades now, neuroscience has focused on spikes as the things that make our cognition tick.



We count them and compare them in different conditions, and generally put a lot of stock in their usefulness in brains.



So what does it mean that modern neural networks disregard spiking altogether?



Maybe spiking really isn't important to process and transmit information as well as our brains do. Or maybe spiking is one among many ways for intelligent systems to function well. Dan shares some of what he's learned and how he thinks about spiking and SNNs and a host of other topics.




Neural Reckoning Group.



Twitter: @neuralreckoning.



Related papers

Neural heterogeneity promotes robust learning.



Dynamics of specialization in neural modules under resource constraints.



Multimodal units fuse-then-accumulate evidence across channels.



Visualizing a joint future of neuroscience and neuromorphic engineering.






0:00 - Intro
3:47 - Why spiking neural networks, and a mathematical background
13:16 - Efficiency
17:36 - Machine learning for neuroscience
19:38 - Why not jump ship from SNNs?
23:35 - Hard and easy tasks
29:20 - How brains and nets learn
32:50 - Exploratory vs. theory-driven science
37:32 - Static vs. dynamic
39:06 - Heterogeneity
46:01 - Unifying principles vs. a hodgepodge
50:37 - Sparsity
58:05 - Specialization and modularity
1:00:51 - Naturalistic experiments
1:03:41 - Projects for SNN research
1:05:09 - The right level of abstraction
1:07:58 - Obstacles to progress
1:12:30 - Levels of explanation
1:14:51 - What has AI taught neuroscience?
1:22:06 - How has neuroscience helped AI?]]>
                </itunes:summary>
                                                                            <itunes:duration>01:28:54</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 182: John Krakauer Returns… Again]]>
                </title>
                <pubDate>Fri, 19 Jan 2024 15:48:28 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/1637913</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-182-john-krakauer-returns-again-1</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>



<p class="has-text-align-center"><a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener">Check out my free video series about what's missing in AI and Neuroscience</a></p>



<a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener"></a>







<p>John Krakauer has been on the podcast multiple times (see links below). Today we discuss some topics framed around what he's been working on and thinking about lately. Things like</p>



<ul>
<li>Whether brains actually reorganize after damage</li>



<li>The role of brain plasticity in general</li>



<li>The path toward and the path <em>not</em> toward understanding higher cognition</li>



<li>How to fix motor problems after strokes</li>



<li>AGI</li>



<li>Functionalism, consciousness, and much more.</li>
</ul>



<p>Relevant links:</p>



<ul>
<li><a href="http://blam-lab.org">John's Lab.</a></li>



<li>Twitter: <a href="https://twitter.com/blamlab">@blamlab</a></li>



<li>Related papers
<ul>
<li><a href="https://www.thetransmitter.org/representation/what-are-we-talking-about-clarifying-the-fuzzy-concept-of-representation-in-neuroscience-and-beyond/">What are we talking about? Clarifying the fuzzy concept of representation in neuroscience and beyond</a>.</li>



<li><a href="https://elifesciences.org/articles/84716">Against cortical reorganisation</a>.</li>
</ul>
</li>



<li>Other episodes with John:
<ul>
<li><a href="https://braininspired.co/podcast/25/">BI 025 John Krakauer: Understanding Cognition</a></li>



<li><a href="https://braininspired.co/podcast/77/">BI 077 David and John Krakauer: Part 1</a></li>



<li><a href="https://braininspired.co/podcast/78/">BI 078 David and John Krakauer: Part 2</a></li>



<li><a href="https://braininspired.co/podcast/113/">BI 113 David Barack and John Krakauer: Two Views On Cognition</a></li>
</ul>
</li>
</ul>



<p>Time stamps
0:00 - Intro
2:07 - It's a podcast episode!
6:47 - Stroke and Sherrington neuroscience
19:26 - Thinking vs. moving, representations
34:15 - What's special about humans?
56:35 - Does cortical reorganization happen?
1:14:08 - Current era in neuroscience</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Support the show to get full episodes and join the Discord community.







Check out my free video series about what's missing in AI and Neuroscience











John Krakauer has been on the podcast multiple times (see links below). Today we discuss some topics framed around what he's been working on and thinking about lately. Things like




Whether brains actually reorganize after damage



The role of brain plasticity in general



The path toward and the path not toward understanding higher cognition



How to fix motor problems after strokes



AGI



Functionalism, consciousness, and much more.




Relevant links:




John's Lab.



Twitter: @blamlab



Related papers

What are we talking about? Clarifying the fuzzy concept of representation in neuroscience and beyond.



Against cortical reorganisation.





Other episodes with John:

BI 025 John Krakauer: Understanding Cognition



BI 077 David and John Krakauer: Part 1



BI 078 David and John Krakauer: Part 2



BI 113 David Barack and John Krakauer: Two Views On Cognition






Time stamps
0:00 - Intro
2:07 - It's a podcast episode!
6:47 - Stroke and Sherrington neuroscience
19:26 - Thinking vs. moving, representations
34:15 - What's special about humans?
56:35 - Does cortical reorganization happen?
1:14:08 - Current era in neuroscience]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 182: John Krakauer Returns… Again]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>



<p class="has-text-align-center"><a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener">Check out my free video series about what's missing in AI and Neuroscience</a></p>



<a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener"></a>







<p>John Krakauer has been on the podcast multiple times (see links below). Today we discuss some topics framed around what he's been working on and thinking about lately. Things like</p>



<ul>
<li>Whether brains actually reorganize after damage</li>



<li>The role of brain plasticity in general</li>



<li>The path toward and the path <em>not</em> toward understanding higher cognition</li>



<li>How to fix motor problems after strokes</li>



<li>AGI</li>



<li>Functionalism, consciousness, and much more.</li>
</ul>



<p>Relevant links:</p>



<ul>
<li><a href="http://blam-lab.org">John's Lab.</a></li>



<li>Twitter: <a href="https://twitter.com/blamlab">@blamlab</a></li>



<li>Related papers
<ul>
<li><a href="https://www.thetransmitter.org/representation/what-are-we-talking-about-clarifying-the-fuzzy-concept-of-representation-in-neuroscience-and-beyond/">What are we talking about? Clarifying the fuzzy concept of representation in neuroscience and beyond</a>.</li>



<li><a href="https://elifesciences.org/articles/84716">Against cortical reorganisation</a>.</li>
</ul>
</li>



<li>Other episodes with John:
<ul>
<li><a href="https://braininspired.co/podcast/25/">BI 025 John Krakauer: Understanding Cognition</a></li>



<li><a href="https://braininspired.co/podcast/77/">BI 077 David and John Krakauer: Part 1</a></li>



<li><a href="https://braininspired.co/podcast/78/">BI 078 David and John Krakauer: Part 2</a></li>



<li><a href="https://braininspired.co/podcast/113/">BI 113 David Barack and John Krakauer: Two Views On Cognition</a></li>
</ul>
</li>
</ul>



<p>Time stamps
0:00 - Intro
2:07 - It's a podcast episode!
6:47 - Stroke and Sherrington neuroscience
19:26 - Thinking vs. moving, representations
34:15 - What's special about humans?
56:35 - Does cortical reorganization happen?
1:14:08 - Current era in neuroscience</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/1637913/c1e-rdvvfzgkr9i7omn1-92k3029xfq16-lypl8b.mp3" length="82987746"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Support the show to get full episodes and join the Discord community.







Check out my free video series about what's missing in AI and Neuroscience











John Krakauer has been on the podcast multiple times (see links below). Today we discuss some topics framed around what he's been working on and thinking about lately. Things like




Whether brains actually reorganize after damage



The role of brain plasticity in general



The path toward and the path not toward understanding higher cognition



How to fix motor problems after strokes



AGI



Functionalism, consciousness, and much more.




Relevant links:




John's Lab.



Twitter: @blamlab



Related papers

What are we talking about? Clarifying the fuzzy concept of representation in neuroscience and beyond.



Against cortical reorganisation.





Other episodes with John:

BI 025 John Krakauer: Understanding Cognition



BI 077 David and John Krakauer: Part 1



BI 078 David and John Krakauer: Part 2



BI 113 David Barack and John Krakauer: Two Views On Cognition






Time stamps
0:00 - Intro
2:07 - It's a podcast episode!
6:47 - Stroke and Sherrington neuroscience
19:26 - Thinking vs. moving, representations
34:15 - What's special about humans?
56:35 - Does cortical reorganization happen?
1:14:08 - Current era in neuroscience]]>
                </itunes:summary>
                                                                            <itunes:duration>01:25:42</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 181 Max Bennett: A Brief History of Intelligence]]>
                </title>
                <pubDate>Mon, 25 Dec 2023 21:32:20 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/1622087</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-181-max-bennett-a-brief-history-of-intelligence</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>



<p class="has-text-align-center"><a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener">Check out my free video series about what's missing in AI and Neuroscience</a></p>



<a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener"></a>









<p>By day, Max Bennett is an entrepreneur. He has cofounded and CEO'd multiple AI and technology companies. By many other countless hours, he has studied brain related sciences. Those long hours of research have payed off in the form of this book, <a href="https://amzn.to/3RSy0iQ">A Brief History of Intelligence: Evolution, AI, and the Five Breakthroughs That Made Our Brains.</a></p>



<p>Three lines of research formed the basis for how Max synthesized knowledge into the ideas in his current book: findings from comparative psychology (comparing brains and minds of different species), evolutionary neuroscience (how brains have evolved), and artificial intelligence, especially the algorithms developed to carry out functions. We go through I think all five of the breakthroughs in some capacity. A recurring theme is that each breakthrough may explain multiple new abilities. For example, the evolution of the neocortex may have endowed early mammals with the ability to simulate or imagine what isn't immediately present, and this ability might further explain mammals' capacity to engage in vicarious trial and error (imagining possible actions before trying them out), the capacity to engage in counterfactual learning (what would have happened if things went differently than they did), and the capacity for episodic memory and imagination.</p>



<p>The book is filled with unifying accounts like that, and it makes for a great read. Strap in, because Max gives a sort of masterclass about many of the ideas in his book.</p>



<ul>
<li>Twitter:
<ul>
<li><a href="https://twitter.com/maxsbennett">@maxsbennett</a></li>
</ul>
</li>



<li>Book:
<ul>
<li><a href="https://amzn.to/3RSy0iQ">A Brief History of Intelligence: Evolution, AI, and the Five Breakthroughs That Made Our Brains</a>.</li>
</ul>
</li>
</ul>



<p>0:00 - Intro
5:26 - Why evolution is important
7:22 - Maclean's triune brain
14:59 - Breakthrough 1: Steering
29:06 - Fish intelligence
40:38 - Breakthrough 3: Mentalizing
52:44 - How could we improve the human brain?
1:00:44 - What is intelligence?
1:13:50 - Breakthrough 5: Speaking</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Support the show to get full episodes and join the Discord community.







Check out my free video series about what's missing in AI and Neuroscience













By day, Max Bennett is an entrepreneur. He has cofounded and CEO'd multiple AI and technology companies. By many other countless hours, he has studied brain related sciences. Those long hours of research have payed off in the form of this book, A Brief History of Intelligence: Evolution, AI, and the Five Breakthroughs That Made Our Brains.



Three lines of research formed the basis for how Max synthesized knowledge into the ideas in his current book: findings from comparative psychology (comparing brains and minds of different species), evolutionary neuroscience (how brains have evolved), and artificial intelligence, especially the algorithms developed to carry out functions. We go through I think all five of the breakthroughs in some capacity. A recurring theme is that each breakthrough may explain multiple new abilities. For example, the evolution of the neocortex may have endowed early mammals with the ability to simulate or imagine what isn't immediately present, and this ability might further explain mammals' capacity to engage in vicarious trial and error (imagining possible actions before trying them out), the capacity to engage in counterfactual learning (what would have happened if things went differently than they did), and the capacity for episodic memory and imagination.



The book is filled with unifying accounts like that, and it makes for a great read. Strap in, because Max gives a sort of masterclass about many of the ideas in his book.




Twitter:

@maxsbennett





Book:

A Brief History of Intelligence: Evolution, AI, and the Five Breakthroughs That Made Our Brains.






0:00 - Intro
5:26 - Why evolution is important
7:22 - Maclean's triune brain
14:59 - Breakthrough 1: Steering
29:06 - Fish intelligence
40:38 - Breakthrough 3: Mentalizing
52:44 - How could we improve the human brain?
1:00:44 - What is intelligence?
1:13:50 - Breakthrough 5: Speaking]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 181 Max Bennett: A Brief History of Intelligence]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>



<p class="has-text-align-center"><a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener">Check out my free video series about what's missing in AI and Neuroscience</a></p>



<a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener"></a>









<p>By day, Max Bennett is an entrepreneur. He has cofounded and CEO'd multiple AI and technology companies. By many other countless hours, he has studied brain related sciences. Those long hours of research have payed off in the form of this book, <a href="https://amzn.to/3RSy0iQ">A Brief History of Intelligence: Evolution, AI, and the Five Breakthroughs That Made Our Brains.</a></p>



<p>Three lines of research formed the basis for how Max synthesized knowledge into the ideas in his current book: findings from comparative psychology (comparing brains and minds of different species), evolutionary neuroscience (how brains have evolved), and artificial intelligence, especially the algorithms developed to carry out functions. We go through I think all five of the breakthroughs in some capacity. A recurring theme is that each breakthrough may explain multiple new abilities. For example, the evolution of the neocortex may have endowed early mammals with the ability to simulate or imagine what isn't immediately present, and this ability might further explain mammals' capacity to engage in vicarious trial and error (imagining possible actions before trying them out), the capacity to engage in counterfactual learning (what would have happened if things went differently than they did), and the capacity for episodic memory and imagination.</p>



<p>The book is filled with unifying accounts like that, and it makes for a great read. Strap in, because Max gives a sort of masterclass about many of the ideas in his book.</p>



<ul>
<li>Twitter:
<ul>
<li><a href="https://twitter.com/maxsbennett">@maxsbennett</a></li>
</ul>
</li>



<li>Book:
<ul>
<li><a href="https://amzn.to/3RSy0iQ">A Brief History of Intelligence: Evolution, AI, and the Five Breakthroughs That Made Our Brains</a>.</li>
</ul>
</li>
</ul>



<p>0:00 - Intro
5:26 - Why evolution is important
7:22 - Maclean's triune brain
14:59 - Breakthrough 1: Steering
29:06 - Fish intelligence
40:38 - Breakthrough 3: Mentalizing
52:44 - How could we improve the human brain?
1:00:44 - What is intelligence?
1:13:50 - Breakthrough 5: Speaking</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/1622087/c1e-gkoohv2nxqij7qzd-wnkvqqm2fz2-7r0krd.mp3" length="84875924"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Support the show to get full episodes and join the Discord community.







Check out my free video series about what's missing in AI and Neuroscience













By day, Max Bennett is an entrepreneur. He has cofounded and CEO'd multiple AI and technology companies. By many other countless hours, he has studied brain related sciences. Those long hours of research have payed off in the form of this book, A Brief History of Intelligence: Evolution, AI, and the Five Breakthroughs That Made Our Brains.



Three lines of research formed the basis for how Max synthesized knowledge into the ideas in his current book: findings from comparative psychology (comparing brains and minds of different species), evolutionary neuroscience (how brains have evolved), and artificial intelligence, especially the algorithms developed to carry out functions. We go through I think all five of the breakthroughs in some capacity. A recurring theme is that each breakthrough may explain multiple new abilities. For example, the evolution of the neocortex may have endowed early mammals with the ability to simulate or imagine what isn't immediately present, and this ability might further explain mammals' capacity to engage in vicarious trial and error (imagining possible actions before trying them out), the capacity to engage in counterfactual learning (what would have happened if things went differently than they did), and the capacity for episodic memory and imagination.



The book is filled with unifying accounts like that, and it makes for a great read. Strap in, because Max gives a sort of masterclass about many of the ideas in his book.




Twitter:

@maxsbennett





Book:

A Brief History of Intelligence: Evolution, AI, and the Five Breakthroughs That Made Our Brains.






0:00 - Intro
5:26 - Why evolution is important
7:22 - Maclean's triune brain
14:59 - Breakthrough 1: Steering
29:06 - Fish intelligence
40:38 - Breakthrough 3: Mentalizing
52:44 - How could we improve the human brain?
1:00:44 - What is intelligence?
1:13:50 - Breakthrough 5: Speaking]]>
                </itunes:summary>
                                                                            <itunes:duration>01:27:30</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 180 Panel Discussion: Long-term Memory Encoding and Connectome Decoding]]>
                </title>
                <pubDate>Mon, 11 Dec 2023 14:39:11 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/1613443</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-180-panel-discussion-long-term-memory-encoding-and-connectome-decoding</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p>Welcome to another special panel discussion episode.</p>



<p>I was recently invited to moderate at discussion amongst 6 people at the annual Aspirational Neuroscience meetup. Aspirational Neuroscience is a nonprofit community run by Kenneth Hayworth. Ken has been on the podcast before <a href="https://braininspired.co/podcast/103/">on episode 103</a>. Ken helps me introduce the meetup and panel discussion for a few minutes. The goal in general was to discuss how current and developing neuroscience technologies might be used to decode a nontrivial memory from a static connectome - what the obstacles are, how to surmount those obstacles, and so on.</p>



<p>There isn't video of the event, just audio, and because we were all sharing microphones and they were being passed around, you'll hear some microphone type noise along the way - but I did my best to optimize the audio quality, and it turned out mostly quite listenable I believe.</p>



<ul>
<li><a href="https://aspirationalneuroscience.org/">Aspirational Neuroscience</a></li>



<li>Panelists:
<ul>
<li><a href="https://alleninstitute.org/person/anton-arkhipov/">Anton Arkhipov</a>, Allen Institute for Brain Science.
<ul>
<li><a href="https://twitter.com/AntonSArkhipov">@AntonSArkhipov</a></li>
</ul>
</li>



<li><a href="http://koerding.com/">Konrad Kording</a>, University of Pennsylvania.
<ul>
<li><a href="https://twitter.com/KordingLab">@KordingLab</a></li>
</ul>
</li>



<li><a href="https://www.tcd.ie/research/profiles/?profile=tryan6">Tomás Ryan</a>, Trinity College Dublin.
<ul>
<li><a href="https://twitter.com/TJRyan_77">@TJRyan_77</a></li>
</ul>
</li>



<li><a href="https://www.janelia.org/people/srinivas-turaga">Srinivas Turaga</a>, Janelia Research Campus.</li>



<li><a href="https://viterbi.usc.edu/directory/faculty/Song/Dong">Dong Song</a>, University of Southern California.
<ul>
<li><a href="https://twitter.com/dongsong">@dongsong</a></li>
</ul>
</li>



<li><a href="https://pni.princeton.edu/people/zhihao-zheng">Zhihao Zheng</a>, Princeton University.
<ul>
<li><a href="https://twitter.com/zhihaozheng">@zhihaozheng</a></li>
</ul>
</li>
</ul>
</li>
</ul>



<p>0:00 - Intro
1:45 - Ken Hayworth
14:09 - Panel Discussion</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Support the show to get full episodes and join the Discord community.









Welcome to another special panel discussion episode.



I was recently invited to moderate at discussion amongst 6 people at the annual Aspirational Neuroscience meetup. Aspirational Neuroscience is a nonprofit community run by Kenneth Hayworth. Ken has been on the podcast before on episode 103. Ken helps me introduce the meetup and panel discussion for a few minutes. The goal in general was to discuss how current and developing neuroscience technologies might be used to decode a nontrivial memory from a static connectome - what the obstacles are, how to surmount those obstacles, and so on.



There isn't video of the event, just audio, and because we were all sharing microphones and they were being passed around, you'll hear some microphone type noise along the way - but I did my best to optimize the audio quality, and it turned out mostly quite listenable I believe.




Aspirational Neuroscience



Panelists:

Anton Arkhipov, Allen Institute for Brain Science.

@AntonSArkhipov





Konrad Kording, University of Pennsylvania.

@KordingLab





Tomás Ryan, Trinity College Dublin.

@TJRyan_77





Srinivas Turaga, Janelia Research Campus.



Dong Song, University of Southern California.

@dongsong





Zhihao Zheng, Princeton University.

@zhihaozheng








0:00 - Intro
1:45 - Ken Hayworth
14:09 - Panel Discussion]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 180 Panel Discussion: Long-term Memory Encoding and Connectome Decoding]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p>Welcome to another special panel discussion episode.</p>



<p>I was recently invited to moderate at discussion amongst 6 people at the annual Aspirational Neuroscience meetup. Aspirational Neuroscience is a nonprofit community run by Kenneth Hayworth. Ken has been on the podcast before <a href="https://braininspired.co/podcast/103/">on episode 103</a>. Ken helps me introduce the meetup and panel discussion for a few minutes. The goal in general was to discuss how current and developing neuroscience technologies might be used to decode a nontrivial memory from a static connectome - what the obstacles are, how to surmount those obstacles, and so on.</p>



<p>There isn't video of the event, just audio, and because we were all sharing microphones and they were being passed around, you'll hear some microphone type noise along the way - but I did my best to optimize the audio quality, and it turned out mostly quite listenable I believe.</p>



<ul>
<li><a href="https://aspirationalneuroscience.org/">Aspirational Neuroscience</a></li>



<li>Panelists:
<ul>
<li><a href="https://alleninstitute.org/person/anton-arkhipov/">Anton Arkhipov</a>, Allen Institute for Brain Science.
<ul>
<li><a href="https://twitter.com/AntonSArkhipov">@AntonSArkhipov</a></li>
</ul>
</li>



<li><a href="http://koerding.com/">Konrad Kording</a>, University of Pennsylvania.
<ul>
<li><a href="https://twitter.com/KordingLab">@KordingLab</a></li>
</ul>
</li>



<li><a href="https://www.tcd.ie/research/profiles/?profile=tryan6">Tomás Ryan</a>, Trinity College Dublin.
<ul>
<li><a href="https://twitter.com/TJRyan_77">@TJRyan_77</a></li>
</ul>
</li>



<li><a href="https://www.janelia.org/people/srinivas-turaga">Srinivas Turaga</a>, Janelia Research Campus.</li>



<li><a href="https://viterbi.usc.edu/directory/faculty/Song/Dong">Dong Song</a>, University of Southern California.
<ul>
<li><a href="https://twitter.com/dongsong">@dongsong</a></li>
</ul>
</li>



<li><a href="https://pni.princeton.edu/people/zhihao-zheng">Zhihao Zheng</a>, Princeton University.
<ul>
<li><a href="https://twitter.com/zhihaozheng">@zhihaozheng</a></li>
</ul>
</li>
</ul>
</li>
</ul>



<p>0:00 - Intro
1:45 - Ken Hayworth
14:09 - Panel Discussion</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/ef78b914-ff8a-4f17-bbed-afa87b28c479-180-Panel-Meetup.mp3" length="86298969"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Support the show to get full episodes and join the Discord community.









Welcome to another special panel discussion episode.



I was recently invited to moderate at discussion amongst 6 people at the annual Aspirational Neuroscience meetup. Aspirational Neuroscience is a nonprofit community run by Kenneth Hayworth. Ken has been on the podcast before on episode 103. Ken helps me introduce the meetup and panel discussion for a few minutes. The goal in general was to discuss how current and developing neuroscience technologies might be used to decode a nontrivial memory from a static connectome - what the obstacles are, how to surmount those obstacles, and so on.



There isn't video of the event, just audio, and because we were all sharing microphones and they were being passed around, you'll hear some microphone type noise along the way - but I did my best to optimize the audio quality, and it turned out mostly quite listenable I believe.




Aspirational Neuroscience



Panelists:

Anton Arkhipov, Allen Institute for Brain Science.

@AntonSArkhipov





Konrad Kording, University of Pennsylvania.

@KordingLab





Tomás Ryan, Trinity College Dublin.

@TJRyan_77





Srinivas Turaga, Janelia Research Campus.



Dong Song, University of Southern California.

@dongsong





Zhihao Zheng, Princeton University.

@zhihaozheng








0:00 - Intro
1:45 - Ken Hayworth
14:09 - Panel Discussion]]>
                </itunes:summary>
                                                                            <itunes:duration>01:29:27</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 179 Laura Gradowski: Include the Fringe with Pluralism]]>
                </title>
                <pubDate>Mon, 27 Nov 2023 02:14:37 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/1603288</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-179-laura-gradowski-include-the-fringe-with-pluralism</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>



<p class="has-text-align-center"><a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener">Check out my free video series about what's missing in AI and Neuroscience</a></p>



<a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener"></a>





<p>Laura Gradowski is a philosopher of science at the University of Pittsburgh. Pluralism is roughly the idea that there is no unified account of any scientific field, that we should be tolerant of and welcome a variety of theoretical and conceptual frameworks, and methods, and goals, when doing science. Pluralism is kind of a buzz word right now in my little neuroscience world, but it's an old and well-trodden notion... many philosophers have been calling for pluralism for many years. But how pluralistic should we be in our studies and explanations in science? Laura suggests we should be very, very pluralistic, and to make her case, she cites examples in the history of science of theories and theorists that were once considered "fringe" but went on to become mainstream accepted theoretical frameworks. I thought it would be fun to have her on to share her ideas about fringe theories, mainstream theories, pluralism, etc. </p>



<p>We discuss a wide range of topics, but also discuss some specific to the brain and mind sciences. Laura goes through an example of something and someone going from fringe to mainstream - the Garcia effect, named after John Garcia, whose findings went agains the grain of behaviorism, the dominant dogma of the day in psychology. But this overturning only happened after Garcia had to endure a long scientific hell of his results being ignored and shunned. So, there are multiple examples like that, and we discuss a handful. This has led Laura to the conclusion we should accept almost all theoretical frameworks, We discuss her ideas about how to implement this, where to draw the line, and much more.</p>



<p></p><a href="https://www.centerphilsci.pitt.edu/fellows/gradowski-laura/">Laura's page</a> at the Center for the Philosophy of Science at the University of Pittsburgh. 



<p></p><a href="https://www.proquest.com/docview/2726491918?pq-origsite=gscholar&amp;fromopenview=true">Facing the Fringe</a>.



<p></p>Garcia's reflections on his troubles: <a href="https://www.appstate.edu/~steelekm/classes/psy5150/Documents/Garcia1981-tilting-at.pdf">Tilting at the Paper Mills of Academe</a>



<p>0:00 - Intro
3:57 - What is fringe?
10:14 - What makes a theory fringe?
14:31 - Fringe to mainstream
17:23 - Garcia effect
28:17 - Fringe to mainstream: other examples
32:38 - Fringe and consciousness
33:19 - Words meanings change over time
40:24 - Pseudoscience
43:25 - How fringe becomes mainstream
47:19 - More fringe characteristics
50:06 - Pluralism as a solution
54:02 - Progress
1:01:39 - Encyclopedia of theories
1:09:20 - When to reject a theory
1:20:07 - How fringe becomes fringe
1:22:50 - Marginilization
1:27:53 - Recipe for fringe theorist</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Support the show to get full episodes and join the Discord community.







Check out my free video series about what's missing in AI and Neuroscience









Laura Gradowski is a philosopher of science at the University of Pittsburgh. Pluralism is roughly the idea that there is no unified account of any scientific field, that we should be tolerant of and welcome a variety of theoretical and conceptual frameworks, and methods, and goals, when doing science. Pluralism is kind of a buzz word right now in my little neuroscience world, but it's an old and well-trodden notion... many philosophers have been calling for pluralism for many years. But how pluralistic should we be in our studies and explanations in science? Laura suggests we should be very, very pluralistic, and to make her case, she cites examples in the history of science of theories and theorists that were once considered "fringe" but went on to become mainstream accepted theoretical frameworks. I thought it would be fun to have her on to share her ideas about fringe theories, mainstream theories, pluralism, etc. 



We discuss a wide range of topics, but also discuss some specific to the brain and mind sciences. Laura goes through an example of something and someone going from fringe to mainstream - the Garcia effect, named after John Garcia, whose findings went agains the grain of behaviorism, the dominant dogma of the day in psychology. But this overturning only happened after Garcia had to endure a long scientific hell of his results being ignored and shunned. So, there are multiple examples like that, and we discuss a handful. This has led Laura to the conclusion we should accept almost all theoretical frameworks, We discuss her ideas about how to implement this, where to draw the line, and much more.



Laura's page at the Center for the Philosophy of Science at the University of Pittsburgh. 



Facing the Fringe.



Garcia's reflections on his troubles: Tilting at the Paper Mills of Academe



0:00 - Intro
3:57 - What is fringe?
10:14 - What makes a theory fringe?
14:31 - Fringe to mainstream
17:23 - Garcia effect
28:17 - Fringe to mainstream: other examples
32:38 - Fringe and consciousness
33:19 - Words meanings change over time
40:24 - Pseudoscience
43:25 - How fringe becomes mainstream
47:19 - More fringe characteristics
50:06 - Pluralism as a solution
54:02 - Progress
1:01:39 - Encyclopedia of theories
1:09:20 - When to reject a theory
1:20:07 - How fringe becomes fringe
1:22:50 - Marginilization
1:27:53 - Recipe for fringe theorist]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 179 Laura Gradowski: Include the Fringe with Pluralism]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>



<p class="has-text-align-center"><a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener">Check out my free video series about what's missing in AI and Neuroscience</a></p>



<a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener"></a>





<p>Laura Gradowski is a philosopher of science at the University of Pittsburgh. Pluralism is roughly the idea that there is no unified account of any scientific field, that we should be tolerant of and welcome a variety of theoretical and conceptual frameworks, and methods, and goals, when doing science. Pluralism is kind of a buzz word right now in my little neuroscience world, but it's an old and well-trodden notion... many philosophers have been calling for pluralism for many years. But how pluralistic should we be in our studies and explanations in science? Laura suggests we should be very, very pluralistic, and to make her case, she cites examples in the history of science of theories and theorists that were once considered "fringe" but went on to become mainstream accepted theoretical frameworks. I thought it would be fun to have her on to share her ideas about fringe theories, mainstream theories, pluralism, etc. </p>



<p>We discuss a wide range of topics, but also discuss some specific to the brain and mind sciences. Laura goes through an example of something and someone going from fringe to mainstream - the Garcia effect, named after John Garcia, whose findings went agains the grain of behaviorism, the dominant dogma of the day in psychology. But this overturning only happened after Garcia had to endure a long scientific hell of his results being ignored and shunned. So, there are multiple examples like that, and we discuss a handful. This has led Laura to the conclusion we should accept almost all theoretical frameworks, We discuss her ideas about how to implement this, where to draw the line, and much more.</p>



<p></p><a href="https://www.centerphilsci.pitt.edu/fellows/gradowski-laura/">Laura's page</a> at the Center for the Philosophy of Science at the University of Pittsburgh. 



<p></p><a href="https://www.proquest.com/docview/2726491918?pq-origsite=gscholar&amp;fromopenview=true">Facing the Fringe</a>.



<p></p>Garcia's reflections on his troubles: <a href="https://www.appstate.edu/~steelekm/classes/psy5150/Documents/Garcia1981-tilting-at.pdf">Tilting at the Paper Mills of Academe</a>



<p>0:00 - Intro
3:57 - What is fringe?
10:14 - What makes a theory fringe?
14:31 - Fringe to mainstream
17:23 - Garcia effect
28:17 - Fringe to mainstream: other examples
32:38 - Fringe and consciousness
33:19 - Words meanings change over time
40:24 - Pseudoscience
43:25 - How fringe becomes mainstream
47:19 - More fringe characteristics
50:06 - Pluralism as a solution
54:02 - Progress
1:01:39 - Encyclopedia of theories
1:09:20 - When to reject a theory
1:20:07 - How fringe becomes fringe
1:22:50 - Marginilization
1:27:53 - Recipe for fringe theorist</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/e8869af7-150a-49cd-8b4d-a8e430198620-179-Laura-Gradowski-public.mp3" length="96539150"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Support the show to get full episodes and join the Discord community.







Check out my free video series about what's missing in AI and Neuroscience









Laura Gradowski is a philosopher of science at the University of Pittsburgh. Pluralism is roughly the idea that there is no unified account of any scientific field, that we should be tolerant of and welcome a variety of theoretical and conceptual frameworks, and methods, and goals, when doing science. Pluralism is kind of a buzz word right now in my little neuroscience world, but it's an old and well-trodden notion... many philosophers have been calling for pluralism for many years. But how pluralistic should we be in our studies and explanations in science? Laura suggests we should be very, very pluralistic, and to make her case, she cites examples in the history of science of theories and theorists that were once considered "fringe" but went on to become mainstream accepted theoretical frameworks. I thought it would be fun to have her on to share her ideas about fringe theories, mainstream theories, pluralism, etc. 



We discuss a wide range of topics, but also discuss some specific to the brain and mind sciences. Laura goes through an example of something and someone going from fringe to mainstream - the Garcia effect, named after John Garcia, whose findings went agains the grain of behaviorism, the dominant dogma of the day in psychology. But this overturning only happened after Garcia had to endure a long scientific hell of his results being ignored and shunned. So, there are multiple examples like that, and we discuss a handful. This has led Laura to the conclusion we should accept almost all theoretical frameworks, We discuss her ideas about how to implement this, where to draw the line, and much more.



Laura's page at the Center for the Philosophy of Science at the University of Pittsburgh. 



Facing the Fringe.



Garcia's reflections on his troubles: Tilting at the Paper Mills of Academe



0:00 - Intro
3:57 - What is fringe?
10:14 - What makes a theory fringe?
14:31 - Fringe to mainstream
17:23 - Garcia effect
28:17 - Fringe to mainstream: other examples
32:38 - Fringe and consciousness
33:19 - Words meanings change over time
40:24 - Pseudoscience
43:25 - How fringe becomes mainstream
47:19 - More fringe characteristics
50:06 - Pluralism as a solution
54:02 - Progress
1:01:39 - Encyclopedia of theories
1:09:20 - When to reject a theory
1:20:07 - How fringe becomes fringe
1:22:50 - Marginilization
1:27:53 - Recipe for fringe theorist]]>
                </itunes:summary>
                                                                            <itunes:duration>01:39:06</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 178 Eric Shea-Brown: Neural Dynamics and Dimensions]]>
                </title>
                <pubDate>Mon, 13 Nov 2023 20:36:19 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/1595727</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-178-eric-shea-brown-neural-dynamics-and-dimensions</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>



<p class="has-text-align-center"><a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener">Check out my free video series about what's missing in AI and Neuroscience</a></p>



<a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener"></a>







<p>Eric Shea-Brown is a theoretical neuroscientist and principle investigator of the working group on neural dynamics at the University of Washington. In this episode, we talk a lot about dynamics and dimensionality in neural networks... how to think about them, why they matter, how Eric's perspectives have changed through his career. We discuss a handful of his specific research findings about dynamics and dimensionality, like how dimensionality changes when one is performing a task versus when you're just sort of going about your day, what we can say about dynamics just by looking at different structural connection motifs, how different modes of learning can rely on different dimensionalities, and more.We also talk about how he goes about choosing what to work on and how to work on it. You'll hear in our discussion how much credit Eric gives to those surrounding him and those who came before him - he drops tons of references and names, so get ready if you want to follow up on some of the many lines of research he mentions.</p>



<ul>
<li><a href="http://faculty.washington.edu/etsb/">Eric's website</a>.</li>



<li>Related papers
<ul>
<li><a href="https://www.nature.com/articles/s41467-021-21696-1">Predictive learning as a network mechanism for extracting low-dimensional latent space representations</a>.</li>



<li><a href="https://www.cell.com/patterns/pdf/S2666-3899(22)00160-X.pdf">A scale-dependent measure of system dimensionality</a>.</li>



<li><a href="https://www.sciencedirect.com/science/article/pii/S0959438823001058">From lazy to rich to exclusive task representations in neural networks and neural codes</a>.</li>



<li><a href="http://arxiv.org/abs/1605.09073">Feedback through graph motifs relates structure and function in complex networks</a>.</li>
</ul>
</li>
</ul>



<p>0:00 - Intro
4:15 - Reflecting on the rise of dynamical systems in neuroscience
11:15 - DST view on macro scale
15:56 - Intuitions
22:07 - Eric's approach
31:13 - Are brains more or less impressive to you now?
38:45 - Why is dimensionality important?
50:03 - High-D in Low-D
54:14 - Dynamical motifs
1:14:56 - Theory for its own sake
1:18:43 - Rich vs. lazy learning
1:22:58 - Latent variables
1:26:58 - What assumptions give you most pause?</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Support the show to get full episodes and join the Discord community.







Check out my free video series about what's missing in AI and Neuroscience











Eric Shea-Brown is a theoretical neuroscientist and principle investigator of the working group on neural dynamics at the University of Washington. In this episode, we talk a lot about dynamics and dimensionality in neural networks... how to think about them, why they matter, how Eric's perspectives have changed through his career. We discuss a handful of his specific research findings about dynamics and dimensionality, like how dimensionality changes when one is performing a task versus when you're just sort of going about your day, what we can say about dynamics just by looking at different structural connection motifs, how different modes of learning can rely on different dimensionalities, and more.We also talk about how he goes about choosing what to work on and how to work on it. You'll hear in our discussion how much credit Eric gives to those surrounding him and those who came before him - he drops tons of references and names, so get ready if you want to follow up on some of the many lines of research he mentions.




Eric's website.



Related papers

Predictive learning as a network mechanism for extracting low-dimensional latent space representations.



A scale-dependent measure of system dimensionality.



From lazy to rich to exclusive task representations in neural networks and neural codes.



Feedback through graph motifs relates structure and function in complex networks.






0:00 - Intro
4:15 - Reflecting on the rise of dynamical systems in neuroscience
11:15 - DST view on macro scale
15:56 - Intuitions
22:07 - Eric's approach
31:13 - Are brains more or less impressive to you now?
38:45 - Why is dimensionality important?
50:03 - High-D in Low-D
54:14 - Dynamical motifs
1:14:56 - Theory for its own sake
1:18:43 - Rich vs. lazy learning
1:22:58 - Latent variables
1:26:58 - What assumptions give you most pause?]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 178 Eric Shea-Brown: Neural Dynamics and Dimensions]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>



<p class="has-text-align-center"><a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener">Check out my free video series about what's missing in AI and Neuroscience</a></p>



<a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener"></a>







<p>Eric Shea-Brown is a theoretical neuroscientist and principle investigator of the working group on neural dynamics at the University of Washington. In this episode, we talk a lot about dynamics and dimensionality in neural networks... how to think about them, why they matter, how Eric's perspectives have changed through his career. We discuss a handful of his specific research findings about dynamics and dimensionality, like how dimensionality changes when one is performing a task versus when you're just sort of going about your day, what we can say about dynamics just by looking at different structural connection motifs, how different modes of learning can rely on different dimensionalities, and more.We also talk about how he goes about choosing what to work on and how to work on it. You'll hear in our discussion how much credit Eric gives to those surrounding him and those who came before him - he drops tons of references and names, so get ready if you want to follow up on some of the many lines of research he mentions.</p>



<ul>
<li><a href="http://faculty.washington.edu/etsb/">Eric's website</a>.</li>



<li>Related papers
<ul>
<li><a href="https://www.nature.com/articles/s41467-021-21696-1">Predictive learning as a network mechanism for extracting low-dimensional latent space representations</a>.</li>



<li><a href="https://www.cell.com/patterns/pdf/S2666-3899(22)00160-X.pdf">A scale-dependent measure of system dimensionality</a>.</li>



<li><a href="https://www.sciencedirect.com/science/article/pii/S0959438823001058">From lazy to rich to exclusive task representations in neural networks and neural codes</a>.</li>



<li><a href="http://arxiv.org/abs/1605.09073">Feedback through graph motifs relates structure and function in complex networks</a>.</li>
</ul>
</li>
</ul>



<p>0:00 - Intro
4:15 - Reflecting on the rise of dynamical systems in neuroscience
11:15 - DST view on macro scale
15:56 - Intuitions
22:07 - Eric's approach
31:13 - Are brains more or less impressive to you now?
38:45 - Why is dimensionality important?
50:03 - High-D in Low-D
54:14 - Dynamical motifs
1:14:56 - Theory for its own sake
1:18:43 - Rich vs. lazy learning
1:22:58 - Latent variables
1:26:58 - What assumptions give you most pause?</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/a689b8a3-350e-4c10-8064-d112691a8a8b-178-Eric-Shea-Brown-public.mp3" length="92823356"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Support the show to get full episodes and join the Discord community.







Check out my free video series about what's missing in AI and Neuroscience











Eric Shea-Brown is a theoretical neuroscientist and principle investigator of the working group on neural dynamics at the University of Washington. In this episode, we talk a lot about dynamics and dimensionality in neural networks... how to think about them, why they matter, how Eric's perspectives have changed through his career. We discuss a handful of his specific research findings about dynamics and dimensionality, like how dimensionality changes when one is performing a task versus when you're just sort of going about your day, what we can say about dynamics just by looking at different structural connection motifs, how different modes of learning can rely on different dimensionalities, and more.We also talk about how he goes about choosing what to work on and how to work on it. You'll hear in our discussion how much credit Eric gives to those surrounding him and those who came before him - he drops tons of references and names, so get ready if you want to follow up on some of the many lines of research he mentions.




Eric's website.



Related papers

Predictive learning as a network mechanism for extracting low-dimensional latent space representations.



A scale-dependent measure of system dimensionality.



From lazy to rich to exclusive task representations in neural networks and neural codes.



Feedback through graph motifs relates structure and function in complex networks.






0:00 - Intro
4:15 - Reflecting on the rise of dynamical systems in neuroscience
11:15 - DST view on macro scale
15:56 - Intuitions
22:07 - Eric's approach
31:13 - Are brains more or less impressive to you now?
38:45 - Why is dimensionality important?
50:03 - High-D in Low-D
54:14 - Dynamical motifs
1:14:56 - Theory for its own sake
1:18:43 - Rich vs. lazy learning
1:22:58 - Latent variables
1:26:58 - What assumptions give you most pause?]]>
                </itunes:summary>
                                                                            <itunes:duration>01:35:31</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 177 Special: Bernstein Workshop Panel]]>
                </title>
                <pubDate>Mon, 30 Oct 2023 01:12:31 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/1586035</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-177-special-bernstein-workshop-panel</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>



<p>I was recently invited to moderate a panel at the Annual Bernstein conference - this one was in Berlin Germany. The panel I moderated was at a satellite workshop at the conference called <a href="https://bernstein-network.de/bernstein-conference/program/satellite-workshops/machine-learning/">How can machine learning be used to generate insights and theories in neuroscience?</a> Below are the panelists. I hope you enjoy the discussion!</p>



<ul>
<li>Program: <a href="https://bernstein-network.de/bernstein-conference/program/satellite-workshops/machine-learning/">How can machine learning be used to generate insights and theories in neuroscience?</a></li>



<li>Panelists:
<ul>
<li>Katrin Franke
<ul>
<li><a href="https://www.eye-tuebingen.de/franke/">Lab website</a>.</li>



<li>Twitter: <a href="https://twitter.com/kfrankelab">@kfrankelab</a>.</li>
</ul>
</li>



<li>Ralf Haefner
<ul>
<li><a href="https://www2.bcs.rochester.edu/sites/haefnerlab/index.html">Haefner lab</a>.</li>



<li>Twitter: <a href="https://twitter.com/haefnerlab">@haefnerlab</a>.</li>
</ul>
</li>



<li>Martin Hebart
<ul>
<li><a href="https://hebartlab.com/">Hebart Lab</a>.</li>



<li>Twitter: <a href="https://twitter.com/martin_hebart">@martin_hebart</a>.</li>
</ul>
</li>



<li>Johannes Jaeger
<ul>
<li><a href="http://www.johannesjaeger.eu/">Yogi's website</a>.</li>



<li>Twitter: <a href="https://twitter.com/yoginho">@yoginho</a>.</li>
</ul>
</li>



<li>Fred Wolf
<ul>
<li><a href="https://www.uni-goettingen.de/en/58058.html">Fred's university webpage</a>.</li>
</ul>
</li>
</ul>
</li>
</ul>



<p>Organizers:</p>



<ul>
<li>Alexander Ecker | University of Göttingen, Germany</li>



<li>Fabian Sinz | University of Göttingen, Germany</li>



<li>Mohammad Bashiri, Pavithra Elumalai, Michaela Vystrcilová | University of Göttingen, Germany</li>
</ul>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Support the show to get full episodes and join the Discord community.







I was recently invited to moderate a panel at the Annual Bernstein conference - this one was in Berlin Germany. The panel I moderated was at a satellite workshop at the conference called How can machine learning be used to generate insights and theories in neuroscience? Below are the panelists. I hope you enjoy the discussion!




Program: How can machine learning be used to generate insights and theories in neuroscience?



Panelists:

Katrin Franke

Lab website.



Twitter: @kfrankelab.





Ralf Haefner

Haefner lab.



Twitter: @haefnerlab.





Martin Hebart

Hebart Lab.



Twitter: @martin_hebart.





Johannes Jaeger

Yogi's website.



Twitter: @yoginho.





Fred Wolf

Fred's university webpage.








Organizers:




Alexander Ecker | University of Göttingen, Germany



Fabian Sinz | University of Göttingen, Germany



Mohammad Bashiri, Pavithra Elumalai, Michaela Vystrcilová | University of Göttingen, Germany
]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 177 Special: Bernstein Workshop Panel]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>



<p>I was recently invited to moderate a panel at the Annual Bernstein conference - this one was in Berlin Germany. The panel I moderated was at a satellite workshop at the conference called <a href="https://bernstein-network.de/bernstein-conference/program/satellite-workshops/machine-learning/">How can machine learning be used to generate insights and theories in neuroscience?</a> Below are the panelists. I hope you enjoy the discussion!</p>



<ul>
<li>Program: <a href="https://bernstein-network.de/bernstein-conference/program/satellite-workshops/machine-learning/">How can machine learning be used to generate insights and theories in neuroscience?</a></li>



<li>Panelists:
<ul>
<li>Katrin Franke
<ul>
<li><a href="https://www.eye-tuebingen.de/franke/">Lab website</a>.</li>



<li>Twitter: <a href="https://twitter.com/kfrankelab">@kfrankelab</a>.</li>
</ul>
</li>



<li>Ralf Haefner
<ul>
<li><a href="https://www2.bcs.rochester.edu/sites/haefnerlab/index.html">Haefner lab</a>.</li>



<li>Twitter: <a href="https://twitter.com/haefnerlab">@haefnerlab</a>.</li>
</ul>
</li>



<li>Martin Hebart
<ul>
<li><a href="https://hebartlab.com/">Hebart Lab</a>.</li>



<li>Twitter: <a href="https://twitter.com/martin_hebart">@martin_hebart</a>.</li>
</ul>
</li>



<li>Johannes Jaeger
<ul>
<li><a href="http://www.johannesjaeger.eu/">Yogi's website</a>.</li>



<li>Twitter: <a href="https://twitter.com/yoginho">@yoginho</a>.</li>
</ul>
</li>



<li>Fred Wolf
<ul>
<li><a href="https://www.uni-goettingen.de/en/58058.html">Fred's university webpage</a>.</li>
</ul>
</li>
</ul>
</li>
</ul>



<p>Organizers:</p>



<ul>
<li>Alexander Ecker | University of Göttingen, Germany</li>



<li>Fabian Sinz | University of Göttingen, Germany</li>



<li>Mohammad Bashiri, Pavithra Elumalai, Michaela Vystrcilová | University of Göttingen, Germany</li>
</ul>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/83c7dc07-6a1c-4cdb-9976-d0f38bc6648f-178-Bernstein.mp3" length="71223961"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Support the show to get full episodes and join the Discord community.







I was recently invited to moderate a panel at the Annual Bernstein conference - this one was in Berlin Germany. The panel I moderated was at a satellite workshop at the conference called How can machine learning be used to generate insights and theories in neuroscience? Below are the panelists. I hope you enjoy the discussion!




Program: How can machine learning be used to generate insights and theories in neuroscience?



Panelists:

Katrin Franke

Lab website.



Twitter: @kfrankelab.





Ralf Haefner

Haefner lab.



Twitter: @haefnerlab.





Martin Hebart

Hebart Lab.



Twitter: @martin_hebart.





Johannes Jaeger

Yogi's website.



Twitter: @yoginho.





Fred Wolf

Fred's university webpage.








Organizers:




Alexander Ecker | University of Göttingen, Germany



Fabian Sinz | University of Göttingen, Germany



Mohammad Bashiri, Pavithra Elumalai, Michaela Vystrcilová | University of Göttingen, Germany
]]>
                </itunes:summary>
                                                                            <itunes:duration>01:13:54</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 176 David Poeppel Returns]]>
                </title>
                <pubDate>Sat, 14 Oct 2023 16:47:13 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/1574784</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-176-david-poeppel-returns</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>







<p>David runs <a href="http://psych.nyu.edu/clash/poeppellab/">his lab at NYU</a>, where they stud`y auditory cognition, speech perception, language, and music. On the heels of the <a href="https://braininspired.co/podcast/172/">episode with David Glanzman</a>, we discuss the ongoing mystery regarding how memory works, how to study and think about brains and minds, and the reemergence (perhaps) of the language of thought hypothesis.</p>



<p>David has been on the podcast a few times... <a href="https://braininspired.co/podcast/46/">once by himself</a>, and <a href="https://braininspired.co/podcast/84/">again with Gyorgy Buzsaki</a>.</p>



<ul>
<li><a href="http://psych.nyu.edu/clash/poeppellab/">Poeppel lab</a></li>



<li>Twitter: <a href="https://twitter.com/davidpoeppel">@davidpoeppel</a>.</li>



<li>Related papers
<ul>
<li><a href="https://www.cell.com/trends/cognitive-sciences/fulltext/S1364-6613(22)00206-6?_returnURL=https%3A%2F%2Flinkinghub.elsevier.com%2Fretrieve%2Fpii%2FS1364661322002066%3Fshowall%3Dtrue">We don’t know how the brain stores anything, let alone words</a>.</li>



<li><a href="https://arxiv.org/pdf/2210.01869.pdf">Memory in humans and deep language models: Linking hypotheses for model augmentation.</a></li>



<li><a href="https://www.sciencedirect.com/science/article/abs/pii/S1364661323001936">The neural ingredients for a language of thought are available.</a></li>
</ul>
</li>
</ul>



<p>0:00 - Intro
11:17 - Across levels
14:598 - Nature of memory
24:12 - Using the right tools for the right question
35:46 - LLMs, what they need, how they've shaped David's thoughts
44:55 - Across levels
54:07 - Speed of progress
1:02:21 - Neuroethology and mental illness - patreon
1:24:42 - Language of Thought</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Support the show to get full episodes and join the Discord community.











David runs his lab at NYU, where they stud`y auditory cognition, speech perception, language, and music. On the heels of the episode with David Glanzman, we discuss the ongoing mystery regarding how memory works, how to study and think about brains and minds, and the reemergence (perhaps) of the language of thought hypothesis.



David has been on the podcast a few times... once by himself, and again with Gyorgy Buzsaki.




Poeppel lab



Twitter: @davidpoeppel.



Related papers

We don’t know how the brain stores anything, let alone words.



Memory in humans and deep language models: Linking hypotheses for model augmentation.



The neural ingredients for a language of thought are available.






0:00 - Intro
11:17 - Across levels
14:598 - Nature of memory
24:12 - Using the right tools for the right question
35:46 - LLMs, what they need, how they've shaped David's thoughts
44:55 - Across levels
54:07 - Speed of progress
1:02:21 - Neuroethology and mental illness - patreon
1:24:42 - Language of Thought]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 176 David Poeppel Returns]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>







<p>David runs <a href="http://psych.nyu.edu/clash/poeppellab/">his lab at NYU</a>, where they stud`y auditory cognition, speech perception, language, and music. On the heels of the <a href="https://braininspired.co/podcast/172/">episode with David Glanzman</a>, we discuss the ongoing mystery regarding how memory works, how to study and think about brains and minds, and the reemergence (perhaps) of the language of thought hypothesis.</p>



<p>David has been on the podcast a few times... <a href="https://braininspired.co/podcast/46/">once by himself</a>, and <a href="https://braininspired.co/podcast/84/">again with Gyorgy Buzsaki</a>.</p>



<ul>
<li><a href="http://psych.nyu.edu/clash/poeppellab/">Poeppel lab</a></li>



<li>Twitter: <a href="https://twitter.com/davidpoeppel">@davidpoeppel</a>.</li>



<li>Related papers
<ul>
<li><a href="https://www.cell.com/trends/cognitive-sciences/fulltext/S1364-6613(22)00206-6?_returnURL=https%3A%2F%2Flinkinghub.elsevier.com%2Fretrieve%2Fpii%2FS1364661322002066%3Fshowall%3Dtrue">We don’t know how the brain stores anything, let alone words</a>.</li>



<li><a href="https://arxiv.org/pdf/2210.01869.pdf">Memory in humans and deep language models: Linking hypotheses for model augmentation.</a></li>



<li><a href="https://www.sciencedirect.com/science/article/abs/pii/S1364661323001936">The neural ingredients for a language of thought are available.</a></li>
</ul>
</li>
</ul>



<p>0:00 - Intro
11:17 - Across levels
14:598 - Nature of memory
24:12 - Using the right tools for the right question
35:46 - LLMs, what they need, how they've shaped David's thoughts
44:55 - Across levels
54:07 - Speed of progress
1:02:21 - Neuroethology and mental illness - patreon
1:24:42 - Language of Thought</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/67345006-b228-423a-844e-6ff8a596a0d8-176-David-Poeppel-public.mp3" length="81395571"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Support the show to get full episodes and join the Discord community.











David runs his lab at NYU, where they stud`y auditory cognition, speech perception, language, and music. On the heels of the episode with David Glanzman, we discuss the ongoing mystery regarding how memory works, how to study and think about brains and minds, and the reemergence (perhaps) of the language of thought hypothesis.



David has been on the podcast a few times... once by himself, and again with Gyorgy Buzsaki.




Poeppel lab



Twitter: @davidpoeppel.



Related papers

We don’t know how the brain stores anything, let alone words.



Memory in humans and deep language models: Linking hypotheses for model augmentation.



The neural ingredients for a language of thought are available.






0:00 - Intro
11:17 - Across levels
14:598 - Nature of memory
24:12 - Using the right tools for the right question
35:46 - LLMs, what they need, how they've shaped David's thoughts
44:55 - Across levels
54:07 - Speed of progress
1:02:21 - Neuroethology and mental illness - patreon
1:24:42 - Language of Thought]]>
                </itunes:summary>
                                                                            <itunes:duration>01:23:57</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 175 Kevin Mitchell: Free Agents]]>
                </title>
                <pubDate>Tue, 03 Oct 2023 10:37:16 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/1567343</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-175-kevin-mitchell-free-agents</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>



<p class="has-text-align-center"><a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener">Check out my free video series about what's missing in AI and Neuroscience</a></p>



<a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener"></a>







<p>Kevin Mitchell is professor of genetics at Trinity College Dublin. He's <a href="https://braininspired.co/podcast/111/">been on the podcast before</a>, and we talked a little about his previous book, Innate – How the Wiring of Our Brains Shapes Who We Are. He's back today to discuss his new book <a href="https://amzn.to/3thGq9V">Free Agents: How Evolution Gave Us Free Will</a>. The book is written very well and guides the reader through a wide range of scientific knowledge and reasoning that undergirds Kevin's main take home: our free will comes from the fact that we are biological organisms, biological organisms have agency, and as that agency evolved to become more complex and layered, so does our ability to exert free will. We touch on a handful of topics in the book, like the idea of agency, how it came about at the origin of life, and how the complexity of kinds of agency, the richness of our agency, evolved as organisms became more complex.</p>





<p>We also discuss Kevin's reliance on the indeterminacy of the universe to tell his story, the underlying randomness at fundamental levels of physics. Although indeterminacy isn't necessary for ongoing free will, it is responsible for the capacity for free will to exist in the first place. We discuss the brain's ability to harness its own randomness when needed, creativity, whether and how it's possible to create something new, artificial free will, and lots more.</p>



<ul>
<li><a href="https://www.kjmitchell.com/">Kevin's website</a>.</li>



<li>Twitter: <a href="https://twitter.com/WiringTheBrain?ref_src=twsrc%5Egoogle%7Ctwcamp%5Eserp%7Ctwgr%5Eauthor">@WiringtheBrain</a></li>



<li>Book: <a href="https://amzn.to/3thGq9V">Free Agents: How Evolution Gave Us Free Will</a></li>
</ul>



<p>4:27 - From Innate to Free Agents
9:14 - Thinking of the whole organism
15:11 - Who the book is for
19:49 - What bothers Kevin
27:00 - Indeterminacy
30:08 - How it all began
33:08 - How indeterminacy helps
43:58 - Libet's free will experiments
50:36 - Creativity
59:16 - Selves, subjective experience, agency, and free will
1:10:04 - Levels of agency and free will
1:20:38 - How much free will can we have?
1:28:03 - Hierarchy of mind constraints
1:36:39 - Artificial agents and free will
1:42:57 - Next book?</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Support the show to get full episodes and join the Discord community.







Check out my free video series about what's missing in AI and Neuroscience











Kevin Mitchell is professor of genetics at Trinity College Dublin. He's been on the podcast before, and we talked a little about his previous book, Innate – How the Wiring of Our Brains Shapes Who We Are. He's back today to discuss his new book Free Agents: How Evolution Gave Us Free Will. The book is written very well and guides the reader through a wide range of scientific knowledge and reasoning that undergirds Kevin's main take home: our free will comes from the fact that we are biological organisms, biological organisms have agency, and as that agency evolved to become more complex and layered, so does our ability to exert free will. We touch on a handful of topics in the book, like the idea of agency, how it came about at the origin of life, and how the complexity of kinds of agency, the richness of our agency, evolved as organisms became more complex.





We also discuss Kevin's reliance on the indeterminacy of the universe to tell his story, the underlying randomness at fundamental levels of physics. Although indeterminacy isn't necessary for ongoing free will, it is responsible for the capacity for free will to exist in the first place. We discuss the brain's ability to harness its own randomness when needed, creativity, whether and how it's possible to create something new, artificial free will, and lots more.




Kevin's website.



Twitter: @WiringtheBrain



Book: Free Agents: How Evolution Gave Us Free Will




4:27 - From Innate to Free Agents
9:14 - Thinking of the whole organism
15:11 - Who the book is for
19:49 - What bothers Kevin
27:00 - Indeterminacy
30:08 - How it all began
33:08 - How indeterminacy helps
43:58 - Libet's free will experiments
50:36 - Creativity
59:16 - Selves, subjective experience, agency, and free will
1:10:04 - Levels of agency and free will
1:20:38 - How much free will can we have?
1:28:03 - Hierarchy of mind constraints
1:36:39 - Artificial agents and free will
1:42:57 - Next book?]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 175 Kevin Mitchell: Free Agents]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>



<p class="has-text-align-center"><a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener">Check out my free video series about what's missing in AI and Neuroscience</a></p>



<a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener"></a>







<p>Kevin Mitchell is professor of genetics at Trinity College Dublin. He's <a href="https://braininspired.co/podcast/111/">been on the podcast before</a>, and we talked a little about his previous book, Innate – How the Wiring of Our Brains Shapes Who We Are. He's back today to discuss his new book <a href="https://amzn.to/3thGq9V">Free Agents: How Evolution Gave Us Free Will</a>. The book is written very well and guides the reader through a wide range of scientific knowledge and reasoning that undergirds Kevin's main take home: our free will comes from the fact that we are biological organisms, biological organisms have agency, and as that agency evolved to become more complex and layered, so does our ability to exert free will. We touch on a handful of topics in the book, like the idea of agency, how it came about at the origin of life, and how the complexity of kinds of agency, the richness of our agency, evolved as organisms became more complex.</p>





<p>We also discuss Kevin's reliance on the indeterminacy of the universe to tell his story, the underlying randomness at fundamental levels of physics. Although indeterminacy isn't necessary for ongoing free will, it is responsible for the capacity for free will to exist in the first place. We discuss the brain's ability to harness its own randomness when needed, creativity, whether and how it's possible to create something new, artificial free will, and lots more.</p>



<ul>
<li><a href="https://www.kjmitchell.com/">Kevin's website</a>.</li>



<li>Twitter: <a href="https://twitter.com/WiringTheBrain?ref_src=twsrc%5Egoogle%7Ctwcamp%5Eserp%7Ctwgr%5Eauthor">@WiringtheBrain</a></li>



<li>Book: <a href="https://amzn.to/3thGq9V">Free Agents: How Evolution Gave Us Free Will</a></li>
</ul>



<p>4:27 - From Innate to Free Agents
9:14 - Thinking of the whole organism
15:11 - Who the book is for
19:49 - What bothers Kevin
27:00 - Indeterminacy
30:08 - How it all began
33:08 - How indeterminacy helps
43:58 - Libet's free will experiments
50:36 - Creativity
59:16 - Selves, subjective experience, agency, and free will
1:10:04 - Levels of agency and free will
1:20:38 - How much free will can we have?
1:28:03 - Hierarchy of mind constraints
1:36:39 - Artificial agents and free will
1:42:57 - Next book?</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/ee881267-f142-43b3-a49a-76f990612136-175-Kevin-Mitchell-public.mp3" length="103521752"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Support the show to get full episodes and join the Discord community.







Check out my free video series about what's missing in AI and Neuroscience











Kevin Mitchell is professor of genetics at Trinity College Dublin. He's been on the podcast before, and we talked a little about his previous book, Innate – How the Wiring of Our Brains Shapes Who We Are. He's back today to discuss his new book Free Agents: How Evolution Gave Us Free Will. The book is written very well and guides the reader through a wide range of scientific knowledge and reasoning that undergirds Kevin's main take home: our free will comes from the fact that we are biological organisms, biological organisms have agency, and as that agency evolved to become more complex and layered, so does our ability to exert free will. We touch on a handful of topics in the book, like the idea of agency, how it came about at the origin of life, and how the complexity of kinds of agency, the richness of our agency, evolved as organisms became more complex.





We also discuss Kevin's reliance on the indeterminacy of the universe to tell his story, the underlying randomness at fundamental levels of physics. Although indeterminacy isn't necessary for ongoing free will, it is responsible for the capacity for free will to exist in the first place. We discuss the brain's ability to harness its own randomness when needed, creativity, whether and how it's possible to create something new, artificial free will, and lots more.




Kevin's website.



Twitter: @WiringtheBrain



Book: Free Agents: How Evolution Gave Us Free Will




4:27 - From Innate to Free Agents
9:14 - Thinking of the whole organism
15:11 - Who the book is for
19:49 - What bothers Kevin
27:00 - Indeterminacy
30:08 - How it all began
33:08 - How indeterminacy helps
43:58 - Libet's free will experiments
50:36 - Creativity
59:16 - Selves, subjective experience, agency, and free will
1:10:04 - Levels of agency and free will
1:20:38 - How much free will can we have?
1:28:03 - Hierarchy of mind constraints
1:36:39 - Artificial agents and free will
1:42:57 - Next book?]]>
                </itunes:summary>
                                                                            <itunes:duration>01:46:32</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 174 Alicia Juarrero: Context Changes Everything]]>
                </title>
                <pubDate>Wed, 13 Sep 2023 13:06:24 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/1555332</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-174-alicia-juarrero-context-changes-everything</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener">Check out my free video series about what's missing in AI and Neuroscience</a></p>



<a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener"></a>



<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p>Alicia Juarrero is a philosopher and has been interested in complexity since before it was cool.</p>





<p>In this episode, we discuss many of the topics and ideas in her new book, <a href="https://mitpress.mit.edu/9780262545662/context-changes-everything/">Context Changes Everything: How Constraints Create Coherence</a>, which makes the thorough case that constraints should be given way more attention when trying to understand complex systems like brains and minds - how they're organized, how they operate, how they're formed and maintained, and so on. Modern science, thanks in large part to the success of physics, focuses on a single kind of causation - the kind involved when one billiard ball strikes another billiard ball. But that kind of causation neglects what Alicia argues are the most important features of complex systems the constraints that shape the dynamics and possibility spaces of systems. Much of Alicia's book describes the wide range of types of constraints we should be paying attention to, and how they interact and mutually influence each other. I highly recommend the book, and you may want to read it before, during, and after our conversation. That's partly because, if you're like me, the concepts she discusses still aren't comfortable to think about the way we're used to thinking about how things interact. Thinking across levels of organization turns out to be hard. You might also want her book handy because, hang on to your hats, we jump around a lot among those concepts. Context Changes everything comes about 25 years after her previous classic, Dynamics In Action, which we also discuss and which I also recommend if you want more of a primer to her newer more expansive work. Alicia's work touches on all things complex, from self-organizing systems like whirlpools, to ecologies, businesses, societies, and of course minds and brains.</p>





<ul>
<li>Book:
<ul>
<li><a href="https://mitpress.mit.edu/9780262545662/context-changes-everything/">Context Changes Everything: How Constraints Create Coherence</a></li>
</ul>
</li>
</ul>



<p>0:00 - Intro
3:37 - 25 years thinking about constraints
8:45 - Dynamics in Action and eliminativism
13:08 - Efficient and other kinds of causation
19:04 - Complexity via context independent and dependent constraints
25:53 - Enabling and limiting constraints
30:55 - Across scales
36:32 - Temporal constraints
42:58 - A constraint cookbook?
52:12 - Constraints in a mechanistic worldview
53:42 - How to explain using constraints
56:22 - Concepts and multiple realizabillity
59:00 - Kevin Mitchell question
1:08:07 - Mac Shine Question
1:19:07 - 4E
1:21:38 - Dimensionality across levels
1:27:26 - AI and constraints
1:33:08 - AI and life</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Check out my free video series about what's missing in AI and Neuroscience







Support the show to get full episodes and join the Discord community.









Alicia Juarrero is a philosopher and has been interested in complexity since before it was cool.





In this episode, we discuss many of the topics and ideas in her new book, Context Changes Everything: How Constraints Create Coherence, which makes the thorough case that constraints should be given way more attention when trying to understand complex systems like brains and minds - how they're organized, how they operate, how they're formed and maintained, and so on. Modern science, thanks in large part to the success of physics, focuses on a single kind of causation - the kind involved when one billiard ball strikes another billiard ball. But that kind of causation neglects what Alicia argues are the most important features of complex systems the constraints that shape the dynamics and possibility spaces of systems. Much of Alicia's book describes the wide range of types of constraints we should be paying attention to, and how they interact and mutually influence each other. I highly recommend the book, and you may want to read it before, during, and after our conversation. That's partly because, if you're like me, the concepts she discusses still aren't comfortable to think about the way we're used to thinking about how things interact. Thinking across levels of organization turns out to be hard. You might also want her book handy because, hang on to your hats, we jump around a lot among those concepts. Context Changes everything comes about 25 years after her previous classic, Dynamics In Action, which we also discuss and which I also recommend if you want more of a primer to her newer more expansive work. Alicia's work touches on all things complex, from self-organizing systems like whirlpools, to ecologies, businesses, societies, and of course minds and brains.






Book:

Context Changes Everything: How Constraints Create Coherence






0:00 - Intro
3:37 - 25 years thinking about constraints
8:45 - Dynamics in Action and eliminativism
13:08 - Efficient and other kinds of causation
19:04 - Complexity via context independent and dependent constraints
25:53 - Enabling and limiting constraints
30:55 - Across scales
36:32 - Temporal constraints
42:58 - A constraint cookbook?
52:12 - Constraints in a mechanistic worldview
53:42 - How to explain using constraints
56:22 - Concepts and multiple realizabillity
59:00 - Kevin Mitchell question
1:08:07 - Mac Shine Question
1:19:07 - 4E
1:21:38 - Dimensionality across levels
1:27:26 - AI and constraints
1:33:08 - AI and life]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 174 Alicia Juarrero: Context Changes Everything]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener">Check out my free video series about what's missing in AI and Neuroscience</a></p>



<a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener"></a>



<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p>Alicia Juarrero is a philosopher and has been interested in complexity since before it was cool.</p>





<p>In this episode, we discuss many of the topics and ideas in her new book, <a href="https://mitpress.mit.edu/9780262545662/context-changes-everything/">Context Changes Everything: How Constraints Create Coherence</a>, which makes the thorough case that constraints should be given way more attention when trying to understand complex systems like brains and minds - how they're organized, how they operate, how they're formed and maintained, and so on. Modern science, thanks in large part to the success of physics, focuses on a single kind of causation - the kind involved when one billiard ball strikes another billiard ball. But that kind of causation neglects what Alicia argues are the most important features of complex systems the constraints that shape the dynamics and possibility spaces of systems. Much of Alicia's book describes the wide range of types of constraints we should be paying attention to, and how they interact and mutually influence each other. I highly recommend the book, and you may want to read it before, during, and after our conversation. That's partly because, if you're like me, the concepts she discusses still aren't comfortable to think about the way we're used to thinking about how things interact. Thinking across levels of organization turns out to be hard. You might also want her book handy because, hang on to your hats, we jump around a lot among those concepts. Context Changes everything comes about 25 years after her previous classic, Dynamics In Action, which we also discuss and which I also recommend if you want more of a primer to her newer more expansive work. Alicia's work touches on all things complex, from self-organizing systems like whirlpools, to ecologies, businesses, societies, and of course minds and brains.</p>





<ul>
<li>Book:
<ul>
<li><a href="https://mitpress.mit.edu/9780262545662/context-changes-everything/">Context Changes Everything: How Constraints Create Coherence</a></li>
</ul>
</li>
</ul>



<p>0:00 - Intro
3:37 - 25 years thinking about constraints
8:45 - Dynamics in Action and eliminativism
13:08 - Efficient and other kinds of causation
19:04 - Complexity via context independent and dependent constraints
25:53 - Enabling and limiting constraints
30:55 - Across scales
36:32 - Temporal constraints
42:58 - A constraint cookbook?
52:12 - Constraints in a mechanistic worldview
53:42 - How to explain using constraints
56:22 - Concepts and multiple realizabillity
59:00 - Kevin Mitchell question
1:08:07 - Mac Shine Question
1:19:07 - 4E
1:21:38 - Dimensionality across levels
1:27:26 - AI and constraints
1:33:08 - AI and life</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/e46afa29-214d-4cc8-9940-51a53a154bdb-174-Alicia-Juarrero-public.mp3" length="102292408"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Check out my free video series about what's missing in AI and Neuroscience







Support the show to get full episodes and join the Discord community.









Alicia Juarrero is a philosopher and has been interested in complexity since before it was cool.





In this episode, we discuss many of the topics and ideas in her new book, Context Changes Everything: How Constraints Create Coherence, which makes the thorough case that constraints should be given way more attention when trying to understand complex systems like brains and minds - how they're organized, how they operate, how they're formed and maintained, and so on. Modern science, thanks in large part to the success of physics, focuses on a single kind of causation - the kind involved when one billiard ball strikes another billiard ball. But that kind of causation neglects what Alicia argues are the most important features of complex systems the constraints that shape the dynamics and possibility spaces of systems. Much of Alicia's book describes the wide range of types of constraints we should be paying attention to, and how they interact and mutually influence each other. I highly recommend the book, and you may want to read it before, during, and after our conversation. That's partly because, if you're like me, the concepts she discusses still aren't comfortable to think about the way we're used to thinking about how things interact. Thinking across levels of organization turns out to be hard. You might also want her book handy because, hang on to your hats, we jump around a lot among those concepts. Context Changes everything comes about 25 years after her previous classic, Dynamics In Action, which we also discuss and which I also recommend if you want more of a primer to her newer more expansive work. Alicia's work touches on all things complex, from self-organizing systems like whirlpools, to ecologies, businesses, societies, and of course minds and brains.






Book:

Context Changes Everything: How Constraints Create Coherence






0:00 - Intro
3:37 - 25 years thinking about constraints
8:45 - Dynamics in Action and eliminativism
13:08 - Efficient and other kinds of causation
19:04 - Complexity via context independent and dependent constraints
25:53 - Enabling and limiting constraints
30:55 - Across scales
36:32 - Temporal constraints
42:58 - A constraint cookbook?
52:12 - Constraints in a mechanistic worldview
53:42 - How to explain using constraints
56:22 - Concepts and multiple realizabillity
59:00 - Kevin Mitchell question
1:08:07 - Mac Shine Question
1:19:07 - 4E
1:21:38 - Dimensionality across levels
1:27:26 - AI and constraints
1:33:08 - AI and life]]>
                </itunes:summary>
                                                                            <itunes:duration>01:45:00</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 173 Justin Wood: Origins of Visual Intelligence]]>
                </title>
                <pubDate>Wed, 30 Aug 2023 13:30:47 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/1545965</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-173-justin-wood-origins-of-visual-intelligence</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p>In the intro, I mention the Bernstein conference workshop I'll participate in, called <a href="https://bernstein-network.de/bernstein-conference/program/satellite-workshops/machine-learning/">How can machine learning be used to generate insights and theories in neuroscience?</a>. Follow that link to learn more, and <a href="https://bernstein-network.de/bernstein-conference/registration/">register for the conference here</a>. Hope to see you there in late September in Berlin!</p>





<p>Justin Wood runs the Wood Lab at Indiana University, and his lab's tagline is "building newborn minds in virtual worlds." In this episode, we discuss his work comparing the visual cognition of newborn chicks and AI models. He uses a controlled-rearing technique with natural chicks, whereby the chicks are raised from birth in completely controlled visual environments. That way, Justin can present designed visual stimuli to test what kinds of visual abilities chicks have or can immediately learn. Then he can building models and AI agents that are trained on the same data as the newborn chicks. The goal is to use the models to better understand natural visual intelligence, and use what we know about natural visual intelligence to help build systems that better emulate biological organisms. We discuss some of the visual abilities of the chicks and what he's found using convolutional neural networks. Beyond vision, we discuss his work studying the development of collective behavior, which compares chicks to a model that uses CNNs, reinforcement learning, and an intrinsic curiosity reward function. All of this informs the age-old nature (nativist) vs. nurture (empiricist) debates, which Justin believes should give way to embrace both nature and nurture.</p>



<ul>
<li><a href="http://buildingamind.com/">Wood lab</a>.</li>



<li>Related papers:
<ul>
<li><a href="https://arxiv.org/abs/2112.06106">Controlled-rearing studies of newborn chicks and deep neural networks</a>.</li>



<li><a href="https://arxiv.org/abs/2111.03796">Development of collective behavior in newborn artificial agents</a>.</li>



<li><a href="https://arxiv.org/abs/2306.05582">A newborn embodied Turing test for view-invariant object recognition</a>.</li>
</ul>
</li>



<li>Justin mentions these papers:
<ul>
<li><a href="https://www.cns.nyu.edu/~tony/vns/readings/dicarlo-cox-2007.pdf">Untangling invariant object recognition (Dicarlo &amp; Cox 2007)</a></li>
</ul>
</li>
</ul>



<p>0:00 - Intro
5:39 - Origins of Justin's current research
11:17 - Controlled rearing approach
21:52 - Comparing newborns and AI models
24:11 - Nativism vs. empiricism
28:15 - CNNs and early visual cognition
29:35 - Smoothness and slowness
50:05 - Early biological development
53:27 - Naturalistic vs. highly controlled
56:30 - Collective behavior in animals and machines
1:02:34 - Curiosity and critical periods
1:09:05 - Controlled rearing vs. other developmental studies
1:13:25 - Breaking natural rules
1:16:33 - Deep RL collective behavior
1:23:16 - Bottom-up and top-down</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Support the show to get full episodes and join the Discord community.









In the intro, I mention the Bernstein conference workshop I'll participate in, called How can machine learning be used to generate insights and theories in neuroscience?. Follow that link to learn more, and register for the conference here. Hope to see you there in late September in Berlin!





Justin Wood runs the Wood Lab at Indiana University, and his lab's tagline is "building newborn minds in virtual worlds." In this episode, we discuss his work comparing the visual cognition of newborn chicks and AI models. He uses a controlled-rearing technique with natural chicks, whereby the chicks are raised from birth in completely controlled visual environments. That way, Justin can present designed visual stimuli to test what kinds of visual abilities chicks have or can immediately learn. Then he can building models and AI agents that are trained on the same data as the newborn chicks. The goal is to use the models to better understand natural visual intelligence, and use what we know about natural visual intelligence to help build systems that better emulate biological organisms. We discuss some of the visual abilities of the chicks and what he's found using convolutional neural networks. Beyond vision, we discuss his work studying the development of collective behavior, which compares chicks to a model that uses CNNs, reinforcement learning, and an intrinsic curiosity reward function. All of this informs the age-old nature (nativist) vs. nurture (empiricist) debates, which Justin believes should give way to embrace both nature and nurture.




Wood lab.



Related papers:

Controlled-rearing studies of newborn chicks and deep neural networks.



Development of collective behavior in newborn artificial agents.



A newborn embodied Turing test for view-invariant object recognition.





Justin mentions these papers:

Untangling invariant object recognition (Dicarlo & Cox 2007)






0:00 - Intro
5:39 - Origins of Justin's current research
11:17 - Controlled rearing approach
21:52 - Comparing newborns and AI models
24:11 - Nativism vs. empiricism
28:15 - CNNs and early visual cognition
29:35 - Smoothness and slowness
50:05 - Early biological development
53:27 - Naturalistic vs. highly controlled
56:30 - Collective behavior in animals and machines
1:02:34 - Curiosity and critical periods
1:09:05 - Controlled rearing vs. other developmental studies
1:13:25 - Breaking natural rules
1:16:33 - Deep RL collective behavior
1:23:16 - Bottom-up and top-down]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 173 Justin Wood: Origins of Visual Intelligence]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p>In the intro, I mention the Bernstein conference workshop I'll participate in, called <a href="https://bernstein-network.de/bernstein-conference/program/satellite-workshops/machine-learning/">How can machine learning be used to generate insights and theories in neuroscience?</a>. Follow that link to learn more, and <a href="https://bernstein-network.de/bernstein-conference/registration/">register for the conference here</a>. Hope to see you there in late September in Berlin!</p>





<p>Justin Wood runs the Wood Lab at Indiana University, and his lab's tagline is "building newborn minds in virtual worlds." In this episode, we discuss his work comparing the visual cognition of newborn chicks and AI models. He uses a controlled-rearing technique with natural chicks, whereby the chicks are raised from birth in completely controlled visual environments. That way, Justin can present designed visual stimuli to test what kinds of visual abilities chicks have or can immediately learn. Then he can building models and AI agents that are trained on the same data as the newborn chicks. The goal is to use the models to better understand natural visual intelligence, and use what we know about natural visual intelligence to help build systems that better emulate biological organisms. We discuss some of the visual abilities of the chicks and what he's found using convolutional neural networks. Beyond vision, we discuss his work studying the development of collective behavior, which compares chicks to a model that uses CNNs, reinforcement learning, and an intrinsic curiosity reward function. All of this informs the age-old nature (nativist) vs. nurture (empiricist) debates, which Justin believes should give way to embrace both nature and nurture.</p>



<ul>
<li><a href="http://buildingamind.com/">Wood lab</a>.</li>



<li>Related papers:
<ul>
<li><a href="https://arxiv.org/abs/2112.06106">Controlled-rearing studies of newborn chicks and deep neural networks</a>.</li>



<li><a href="https://arxiv.org/abs/2111.03796">Development of collective behavior in newborn artificial agents</a>.</li>



<li><a href="https://arxiv.org/abs/2306.05582">A newborn embodied Turing test for view-invariant object recognition</a>.</li>
</ul>
</li>



<li>Justin mentions these papers:
<ul>
<li><a href="https://www.cns.nyu.edu/~tony/vns/readings/dicarlo-cox-2007.pdf">Untangling invariant object recognition (Dicarlo &amp; Cox 2007)</a></li>
</ul>
</li>
</ul>



<p>0:00 - Intro
5:39 - Origins of Justin's current research
11:17 - Controlled rearing approach
21:52 - Comparing newborns and AI models
24:11 - Nativism vs. empiricism
28:15 - CNNs and early visual cognition
29:35 - Smoothness and slowness
50:05 - Early biological development
53:27 - Naturalistic vs. highly controlled
56:30 - Collective behavior in animals and machines
1:02:34 - Curiosity and critical periods
1:09:05 - Controlled rearing vs. other developmental studies
1:13:25 - Breaking natural rules
1:16:33 - Deep RL collective behavior
1:23:16 - Bottom-up and top-down</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/2b1c4eee-b8f6-429a-aa5b-ad58d7d34578-173-Justin-Wood-public.mp3" length="93047261"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Support the show to get full episodes and join the Discord community.









In the intro, I mention the Bernstein conference workshop I'll participate in, called How can machine learning be used to generate insights and theories in neuroscience?. Follow that link to learn more, and register for the conference here. Hope to see you there in late September in Berlin!





Justin Wood runs the Wood Lab at Indiana University, and his lab's tagline is "building newborn minds in virtual worlds." In this episode, we discuss his work comparing the visual cognition of newborn chicks and AI models. He uses a controlled-rearing technique with natural chicks, whereby the chicks are raised from birth in completely controlled visual environments. That way, Justin can present designed visual stimuli to test what kinds of visual abilities chicks have or can immediately learn. Then he can building models and AI agents that are trained on the same data as the newborn chicks. The goal is to use the models to better understand natural visual intelligence, and use what we know about natural visual intelligence to help build systems that better emulate biological organisms. We discuss some of the visual abilities of the chicks and what he's found using convolutional neural networks. Beyond vision, we discuss his work studying the development of collective behavior, which compares chicks to a model that uses CNNs, reinforcement learning, and an intrinsic curiosity reward function. All of this informs the age-old nature (nativist) vs. nurture (empiricist) debates, which Justin believes should give way to embrace both nature and nurture.




Wood lab.



Related papers:

Controlled-rearing studies of newborn chicks and deep neural networks.



Development of collective behavior in newborn artificial agents.



A newborn embodied Turing test for view-invariant object recognition.





Justin mentions these papers:

Untangling invariant object recognition (Dicarlo & Cox 2007)






0:00 - Intro
5:39 - Origins of Justin's current research
11:17 - Controlled rearing approach
21:52 - Comparing newborns and AI models
24:11 - Nativism vs. empiricism
28:15 - CNNs and early visual cognition
29:35 - Smoothness and slowness
50:05 - Early biological development
53:27 - Naturalistic vs. highly controlled
56:30 - Collective behavior in animals and machines
1:02:34 - Curiosity and critical periods
1:09:05 - Controlled rearing vs. other developmental studies
1:13:25 - Breaking natural rules
1:16:33 - Deep RL collective behavior
1:23:16 - Bottom-up and top-down]]>
                </itunes:summary>
                                                                            <itunes:duration>01:35:45</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 172 David Glanzman: Memory All The Way Down]]>
                </title>
                <pubDate>Mon, 07 Aug 2023 10:46:27 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/1532285</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-172-david-glanzman-memory-all-the-way-down</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>







<p>David runs his lab at UCLA where he's also a distinguished professor.  David used to believe what is currently the mainstream view, that our memories are stored in our synapses, those connections between our neurons.  So as we learn, the synaptic connections strengthen and weaken until their just right, and that serves to preserve the memory. That's been the dominant view in neuroscience for decades, and is the fundamental principle that underlies basically all of deep learning in AI. But because of his own and others experiments, which he describes in this episode, David has come to the conclusion that memory must be stored not at the synapse, but in the nucleus of neurons, likely by some epigenetic mechanism mediated by RNA molecules. If this sounds familiar, I had Randy Gallistel on the the podcast on episode 126 to discuss similar ideas, and David discusses where he and Randy differ in their thoughts. This episode starts out pretty technical as David describes the series of experiments that changed his mind, but after that we broaden our discussion to a lot of the surrounding issues regarding whether and if his story about memory is true. And we discuss meta-issues like how old discarded ideas in science often find their way back, what it's like studying non-mainstream topic, including challenges trying to get funded for it, and so on.</p>







<ul>
<li><a href="https://www.ibp.ucla.edu/faculty/david-glanzman/">David's Faculty Page</a>.</li>



<li>Related papers
<ul>
<li><a href="https://www.sciencedirect.com/science/article/abs/pii/S0006291X21007518">The central importance of nuclear mechanisms in the storage of memory</a>.</li>



<li>David mentions Arc and virus-like transmission:
<ul>
<li><a href="https://www.cell.com/cell/fulltext/S0092-8674(17)31504-0">The Neuronal Gene Arc Encodes a Repurposed Retrotransposon Gag Protein that Mediates Intercellular RNA Transfer</a>.</li>



<li><a href="https://pubmed.ncbi.nlm.nih.gov/31953526/">Structure of an Arc-ane virus-like capsid</a>.</li>
</ul>
</li>
</ul>
</li>



<li>David mentions many of the ideas from the <a href="https://2023symposium.ibp.ucla.edu/">Pushing the Boundaries: Neuroscience, Cognition, and Life</a>  Symposium.</li>



<li>Related episodes:
<ul>
<li><a href="https://braininspired.co/podcast/126/">BI 126 Randy Gallistel: Where Is the Engram?</a></li>



<li><a href="https://braininspired.co/podcast/127/">BI 127 Tomás Ryan: Memory, Instinct, and Forgetting</a></li>
</ul>
</li>
</ul>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Support the show to get full episodes and join the Discord community.











David runs his lab at UCLA where he's also a distinguished professor.  David used to believe what is currently the mainstream view, that our memories are stored in our synapses, those connections between our neurons.  So as we learn, the synaptic connections strengthen and weaken until their just right, and that serves to preserve the memory. That's been the dominant view in neuroscience for decades, and is the fundamental principle that underlies basically all of deep learning in AI. But because of his own and others experiments, which he describes in this episode, David has come to the conclusion that memory must be stored not at the synapse, but in the nucleus of neurons, likely by some epigenetic mechanism mediated by RNA molecules. If this sounds familiar, I had Randy Gallistel on the the podcast on episode 126 to discuss similar ideas, and David discusses where he and Randy differ in their thoughts. This episode starts out pretty technical as David describes the series of experiments that changed his mind, but after that we broaden our discussion to a lot of the surrounding issues regarding whether and if his story about memory is true. And we discuss meta-issues like how old discarded ideas in science often find their way back, what it's like studying non-mainstream topic, including challenges trying to get funded for it, and so on.








David's Faculty Page.



Related papers

The central importance of nuclear mechanisms in the storage of memory.



David mentions Arc and virus-like transmission:

The Neuronal Gene Arc Encodes a Repurposed Retrotransposon Gag Protein that Mediates Intercellular RNA Transfer.



Structure of an Arc-ane virus-like capsid.







David mentions many of the ideas from the Pushing the Boundaries: Neuroscience, Cognition, and Life  Symposium.



Related episodes:

BI 126 Randy Gallistel: Where Is the Engram?



BI 127 Tomás Ryan: Memory, Instinct, and Forgetting


]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 172 David Glanzman: Memory All The Way Down]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>







<p>David runs his lab at UCLA where he's also a distinguished professor.  David used to believe what is currently the mainstream view, that our memories are stored in our synapses, those connections between our neurons.  So as we learn, the synaptic connections strengthen and weaken until their just right, and that serves to preserve the memory. That's been the dominant view in neuroscience for decades, and is the fundamental principle that underlies basically all of deep learning in AI. But because of his own and others experiments, which he describes in this episode, David has come to the conclusion that memory must be stored not at the synapse, but in the nucleus of neurons, likely by some epigenetic mechanism mediated by RNA molecules. If this sounds familiar, I had Randy Gallistel on the the podcast on episode 126 to discuss similar ideas, and David discusses where he and Randy differ in their thoughts. This episode starts out pretty technical as David describes the series of experiments that changed his mind, but after that we broaden our discussion to a lot of the surrounding issues regarding whether and if his story about memory is true. And we discuss meta-issues like how old discarded ideas in science often find their way back, what it's like studying non-mainstream topic, including challenges trying to get funded for it, and so on.</p>







<ul>
<li><a href="https://www.ibp.ucla.edu/faculty/david-glanzman/">David's Faculty Page</a>.</li>



<li>Related papers
<ul>
<li><a href="https://www.sciencedirect.com/science/article/abs/pii/S0006291X21007518">The central importance of nuclear mechanisms in the storage of memory</a>.</li>



<li>David mentions Arc and virus-like transmission:
<ul>
<li><a href="https://www.cell.com/cell/fulltext/S0092-8674(17)31504-0">The Neuronal Gene Arc Encodes a Repurposed Retrotransposon Gag Protein that Mediates Intercellular RNA Transfer</a>.</li>



<li><a href="https://pubmed.ncbi.nlm.nih.gov/31953526/">Structure of an Arc-ane virus-like capsid</a>.</li>
</ul>
</li>
</ul>
</li>



<li>David mentions many of the ideas from the <a href="https://2023symposium.ibp.ucla.edu/">Pushing the Boundaries: Neuroscience, Cognition, and Life</a>  Symposium.</li>



<li>Related episodes:
<ul>
<li><a href="https://braininspired.co/podcast/126/">BI 126 Randy Gallistel: Where Is the Engram?</a></li>



<li><a href="https://braininspired.co/podcast/127/">BI 127 Tomás Ryan: Memory, Instinct, and Forgetting</a></li>
</ul>
</li>
</ul>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/f88fec1d-c084-47cd-bbe6-082336b34e8b-172-David-Glanzman-Public.mp3" length="88463504"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Support the show to get full episodes and join the Discord community.











David runs his lab at UCLA where he's also a distinguished professor.  David used to believe what is currently the mainstream view, that our memories are stored in our synapses, those connections between our neurons.  So as we learn, the synaptic connections strengthen and weaken until their just right, and that serves to preserve the memory. That's been the dominant view in neuroscience for decades, and is the fundamental principle that underlies basically all of deep learning in AI. But because of his own and others experiments, which he describes in this episode, David has come to the conclusion that memory must be stored not at the synapse, but in the nucleus of neurons, likely by some epigenetic mechanism mediated by RNA molecules. If this sounds familiar, I had Randy Gallistel on the the podcast on episode 126 to discuss similar ideas, and David discusses where he and Randy differ in their thoughts. This episode starts out pretty technical as David describes the series of experiments that changed his mind, but after that we broaden our discussion to a lot of the surrounding issues regarding whether and if his story about memory is true. And we discuss meta-issues like how old discarded ideas in science often find their way back, what it's like studying non-mainstream topic, including challenges trying to get funded for it, and so on.








David's Faculty Page.



Related papers

The central importance of nuclear mechanisms in the storage of memory.



David mentions Arc and virus-like transmission:

The Neuronal Gene Arc Encodes a Repurposed Retrotransposon Gag Protein that Mediates Intercellular RNA Transfer.



Structure of an Arc-ane virus-like capsid.







David mentions many of the ideas from the Pushing the Boundaries: Neuroscience, Cognition, and Life  Symposium.



Related episodes:

BI 126 Randy Gallistel: Where Is the Engram?



BI 127 Tomás Ryan: Memory, Instinct, and Forgetting


]]>
                </itunes:summary>
                                                                            <itunes:duration>01:30:58</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 171 Mike Frank: Early Language and Cognition]]>
                </title>
                <pubDate>Sat, 22 Jul 2023 00:17:23 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/1521357</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-171-mike-frank-early-language-and-cognition</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>



<p class="has-text-align-center"><a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener">Check out my free video series about what's missing in AI and Neuroscience</a></p>



<a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener"></a>







<p>My guest is Michael C. Frank, better known as Mike Frank, who runs the Language and Cognition lab at Stanford. Mike's main interests center on how children learn language - in particular he focuses a lot on early word learning, and what that tells us about our other cognitive functions, like concept formation and social cognition.</p>



<p>We discuss that, his love for developing open data sets that anyone can use,</p>



<p>The dance he dances between bottom-up data-driven approaches in this big data era, traditional experimental approaches, and top-down theory-driven approaches</p>



<p>How early language learning in children differs from LLM learning</p>



<p>Mike's rational speech act model of language use, which considers the intentions or pragmatics of speakers and listeners in dialogue.</p>



<ul>
<li><a href="https://langcog.stanford.edu/">Language &amp; Cognition Lab</a></li>



<li>Twitter: <a href="https://twitter.com/mcxfrank">@mcxfrank</a>.
<ul>
<li>I mentioned Mike's <a href="https://twitter.com/mcxfrank/status/1643296168276033538">tweet thread</a> about saying LLMs "have" cognitive functions:</li>
</ul>
</li>



<li>Related papers:
<ul>
<li><a href="http://langcog.stanford.edu/papers_new/goodman-2016-tics.pdf">Pragmatic language interpretation as probabilistic inference.</a></li>



<li><a href="https://psyarxiv.com/yhrb4">Toward a “Standard Model” of Early Language Learning.</a></li>



<li><a href="https://psyarxiv.com/v8e56/">The pervasive role of pragmatics in early language.</a></li>



<li><a href="https://psyarxiv.com/95erq">The Structure of Developmental Variation in Early Childhood.</a></li>



<li><a href="https://arxiv.org/pdf/2006.07968.pdf">Relational reasoning and generalization using non-symbolic neural networks.</a></li>



<li><a href="https://www.pnas.org/doi/10.1073/pnas.2014196118">Unsupervised neural network models of the ventral visual stream.</a></li>
</ul>
</li>
</ul>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Support the show to get full episodes and join the Discord community.







Check out my free video series about what's missing in AI and Neuroscience











My guest is Michael C. Frank, better known as Mike Frank, who runs the Language and Cognition lab at Stanford. Mike's main interests center on how children learn language - in particular he focuses a lot on early word learning, and what that tells us about our other cognitive functions, like concept formation and social cognition.



We discuss that, his love for developing open data sets that anyone can use,



The dance he dances between bottom-up data-driven approaches in this big data era, traditional experimental approaches, and top-down theory-driven approaches



How early language learning in children differs from LLM learning



Mike's rational speech act model of language use, which considers the intentions or pragmatics of speakers and listeners in dialogue.




Language & Cognition Lab



Twitter: @mcxfrank.

I mentioned Mike's tweet thread about saying LLMs "have" cognitive functions:





Related papers:

Pragmatic language interpretation as probabilistic inference.



Toward a “Standard Model” of Early Language Learning.



The pervasive role of pragmatics in early language.



The Structure of Developmental Variation in Early Childhood.



Relational reasoning and generalization using non-symbolic neural networks.



Unsupervised neural network models of the ventral visual stream.


]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 171 Mike Frank: Early Language and Cognition]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>



<p class="has-text-align-center"><a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener">Check out my free video series about what's missing in AI and Neuroscience</a></p>



<a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener"></a>







<p>My guest is Michael C. Frank, better known as Mike Frank, who runs the Language and Cognition lab at Stanford. Mike's main interests center on how children learn language - in particular he focuses a lot on early word learning, and what that tells us about our other cognitive functions, like concept formation and social cognition.</p>



<p>We discuss that, his love for developing open data sets that anyone can use,</p>



<p>The dance he dances between bottom-up data-driven approaches in this big data era, traditional experimental approaches, and top-down theory-driven approaches</p>



<p>How early language learning in children differs from LLM learning</p>



<p>Mike's rational speech act model of language use, which considers the intentions or pragmatics of speakers and listeners in dialogue.</p>



<ul>
<li><a href="https://langcog.stanford.edu/">Language &amp; Cognition Lab</a></li>



<li>Twitter: <a href="https://twitter.com/mcxfrank">@mcxfrank</a>.
<ul>
<li>I mentioned Mike's <a href="https://twitter.com/mcxfrank/status/1643296168276033538">tweet thread</a> about saying LLMs "have" cognitive functions:</li>
</ul>
</li>



<li>Related papers:
<ul>
<li><a href="http://langcog.stanford.edu/papers_new/goodman-2016-tics.pdf">Pragmatic language interpretation as probabilistic inference.</a></li>



<li><a href="https://psyarxiv.com/yhrb4">Toward a “Standard Model” of Early Language Learning.</a></li>



<li><a href="https://psyarxiv.com/v8e56/">The pervasive role of pragmatics in early language.</a></li>



<li><a href="https://psyarxiv.com/95erq">The Structure of Developmental Variation in Early Childhood.</a></li>



<li><a href="https://arxiv.org/pdf/2006.07968.pdf">Relational reasoning and generalization using non-symbolic neural networks.</a></li>



<li><a href="https://www.pnas.org/doi/10.1073/pnas.2014196118">Unsupervised neural network models of the ventral visual stream.</a></li>
</ul>
</li>
</ul>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/2f8a3181-fc33-4451-8c65-bc55517744c4-171-Mike-Frank-public.mp3" length="82274440"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Support the show to get full episodes and join the Discord community.







Check out my free video series about what's missing in AI and Neuroscience











My guest is Michael C. Frank, better known as Mike Frank, who runs the Language and Cognition lab at Stanford. Mike's main interests center on how children learn language - in particular he focuses a lot on early word learning, and what that tells us about our other cognitive functions, like concept formation and social cognition.



We discuss that, his love for developing open data sets that anyone can use,



The dance he dances between bottom-up data-driven approaches in this big data era, traditional experimental approaches, and top-down theory-driven approaches



How early language learning in children differs from LLM learning



Mike's rational speech act model of language use, which considers the intentions or pragmatics of speakers and listeners in dialogue.




Language & Cognition Lab



Twitter: @mcxfrank.

I mentioned Mike's tweet thread about saying LLMs "have" cognitive functions:





Related papers:

Pragmatic language interpretation as probabilistic inference.



Toward a “Standard Model” of Early Language Learning.



The pervasive role of pragmatics in early language.



The Structure of Developmental Variation in Early Childhood.



Relational reasoning and generalization using non-symbolic neural networks.



Unsupervised neural network models of the ventral visual stream.


]]>
                </itunes:summary>
                                                                            <itunes:duration>01:24:40</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 170 Ali Mohebi: Starting a Research Lab]]>
                </title>
                <pubDate>Tue, 11 Jul 2023 18:11:18 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/1514904</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-170-ali-mohebi-starting-a-research-lab</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>



<p class="has-text-align-center"><a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener">Check out my free video series about what's missing in AI and Neuroscience</a></p>



<a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener"></a>





<p>In this episode I have a casual chat with Ali Mohebi about his new faculty position and his plans for the future.</p>



<ul>
<li><a href="https://mohebial.com/">Ali's website</a>.</li>



<li>Twitter: <a href="https://twitter.com/mohebial">@mohebial</a></li>
</ul>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Support the show to get full episodes and join the Discord community.







Check out my free video series about what's missing in AI and Neuroscience









In this episode I have a casual chat with Ali Mohebi about his new faculty position and his plans for the future.




Ali's website.



Twitter: @mohebial
]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 170 Ali Mohebi: Starting a Research Lab]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>



<p class="has-text-align-center"><a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener">Check out my free video series about what's missing in AI and Neuroscience</a></p>



<a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener"></a>





<p>In this episode I have a casual chat with Ali Mohebi about his new faculty position and his plans for the future.</p>



<ul>
<li><a href="https://mohebial.com/">Ali's website</a>.</li>



<li>Twitter: <a href="https://twitter.com/mohebial">@mohebial</a></li>
</ul>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/3ddf5d3d-ba06-4ef4-a106-c0e1d4744770-170-Ali-Mohebi.mp3" length="75161365"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Support the show to get full episodes and join the Discord community.







Check out my free video series about what's missing in AI and Neuroscience









In this episode I have a casual chat with Ali Mohebi about his new faculty position and his plans for the future.




Ali's website.



Twitter: @mohebial
]]>
                </itunes:summary>
                                                                            <itunes:duration>01:17:15</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 169 Andrea Martin: Neural Dynamics and Language]]>
                </title>
                <pubDate>Wed, 28 Jun 2023 18:00:16 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/1507628</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-169-andrea-martin-neural-dynamics-and-language</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>



<p class="has-text-align-center"><a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener">Check out my free video series about what's missing in AI and Neuroscience</a></p>



<a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener"></a>





<p>My guest today is Andrea Martin, who is the Research Group Leader in the department of Language and Computation in Neural Systems at the Max Plank Institute and the Donders Institute. Andrea is deeply interested in understanding how our biological brains process and represent language. To this end, she is developing a theoretical model of language. The aim of the model is to account for the properties of language, like its structure, its compositionality, its infinite expressibility, while adhering to physiological data we <em>can </em>measure from human brains.</p>



<p>Her theoretical model of language, among other things, brings in the idea of low-dimensional manifolds and neural dynamics along those manifolds. We've discussed manifolds a lot on the podcast, but they are a kind of abstract structure in the space of possible neural population activity - the neural dynamics. And that manifold structure defines the range of possible trajectories, or pathways, the neural dynamics can take over  time.</p>



<p>One of Andrea's ideas is that manifolds might be a way for the brain to combine two properties of how we learn and use language. One of those properties is the statistical regularities found in language - a given word, for example, occurs more often near some words and less often near some other words. This statistical approach is the foundation of how large language models are trained. The other property is the more formal structure of language: how it's arranged and organized in such a way that gives it meaning to us. Perhaps these two properties of language can come together as a single trajectory along a neural manifold. But she has lots of ideas, and we discuss many of them. And of course we discuss large language models, and how Andrea thinks of them with respect to biological cognition. We talk about modeling in general and what models do and don't tell us, and much more.</p>



<ul>
<li><a href="https://sites.google.com/site/aemn1011/home">Andrea's website.</a></li>



<li>Twitter: <a href="https://twitter.com/andrea_e_martin">@andrea_e_martin</a>.</li>



<li>Related papers
<ul>
<li><a href="https://pure.mpg.de/rest/items/item_3196366_5/component/file_3240713/content">A Compositional Neural Architecture for Language</a></li>



<li><a href="https://pure.mpg.de/rest/items/item_3335841_1/component/file_3335842/content">An oscillating computational model can track pseudo-rhythmic speech by using linguistic predictions</a></li>



<li><a href="https://journals.plos.org/plosbiology/article?id=10.1371/journal.pbio.3001713">Neural dynamics differentially encode phrases and sentences during spoken language comprehension</a></li>



<li><a href="https://psyarxiv.com/x59un/">Hierarchical structure in language and action: A formal comparison</a></li>
</ul>
</li>



<li>Andrea mentions this book: <a href="https://amzn.to/3NDa5Cd">The Geometry of Biological Time</a>.</li>
</ul>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Support the show to get full episodes and join the Discord community.







Check out my free video series about what's missing in AI and Neuroscience









My guest today is Andrea Martin, who is the Research Group Leader in the department of Language and Computation in Neural Systems at the Max Plank Institute and the Donders Institute. Andrea is deeply interested in understanding how our biological brains process and represent language. To this end, she is developing a theoretical model of language. The aim of the model is to account for the properties of language, like its structure, its compositionality, its infinite expressibility, while adhering to physiological data we can measure from human brains.



Her theoretical model of language, among other things, brings in the idea of low-dimensional manifolds and neural dynamics along those manifolds. We've discussed manifolds a lot on the podcast, but they are a kind of abstract structure in the space of possible neural population activity - the neural dynamics. And that manifold structure defines the range of possible trajectories, or pathways, the neural dynamics can take over  time.



One of Andrea's ideas is that manifolds might be a way for the brain to combine two properties of how we learn and use language. One of those properties is the statistical regularities found in language - a given word, for example, occurs more often near some words and less often near some other words. This statistical approach is the foundation of how large language models are trained. The other property is the more formal structure of language: how it's arranged and organized in such a way that gives it meaning to us. Perhaps these two properties of language can come together as a single trajectory along a neural manifold. But she has lots of ideas, and we discuss many of them. And of course we discuss large language models, and how Andrea thinks of them with respect to biological cognition. We talk about modeling in general and what models do and don't tell us, and much more.




Andrea's website.



Twitter: @andrea_e_martin.



Related papers

A Compositional Neural Architecture for Language



An oscillating computational model can track pseudo-rhythmic speech by using linguistic predictions



Neural dynamics differentially encode phrases and sentences during spoken language comprehension



Hierarchical structure in language and action: A formal comparison





Andrea mentions this book: The Geometry of Biological Time.
]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 169 Andrea Martin: Neural Dynamics and Language]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>



<p class="has-text-align-center"><a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener">Check out my free video series about what's missing in AI and Neuroscience</a></p>



<a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener"></a>





<p>My guest today is Andrea Martin, who is the Research Group Leader in the department of Language and Computation in Neural Systems at the Max Plank Institute and the Donders Institute. Andrea is deeply interested in understanding how our biological brains process and represent language. To this end, she is developing a theoretical model of language. The aim of the model is to account for the properties of language, like its structure, its compositionality, its infinite expressibility, while adhering to physiological data we <em>can </em>measure from human brains.</p>



<p>Her theoretical model of language, among other things, brings in the idea of low-dimensional manifolds and neural dynamics along those manifolds. We've discussed manifolds a lot on the podcast, but they are a kind of abstract structure in the space of possible neural population activity - the neural dynamics. And that manifold structure defines the range of possible trajectories, or pathways, the neural dynamics can take over  time.</p>



<p>One of Andrea's ideas is that manifolds might be a way for the brain to combine two properties of how we learn and use language. One of those properties is the statistical regularities found in language - a given word, for example, occurs more often near some words and less often near some other words. This statistical approach is the foundation of how large language models are trained. The other property is the more formal structure of language: how it's arranged and organized in such a way that gives it meaning to us. Perhaps these two properties of language can come together as a single trajectory along a neural manifold. But she has lots of ideas, and we discuss many of them. And of course we discuss large language models, and how Andrea thinks of them with respect to biological cognition. We talk about modeling in general and what models do and don't tell us, and much more.</p>



<ul>
<li><a href="https://sites.google.com/site/aemn1011/home">Andrea's website.</a></li>



<li>Twitter: <a href="https://twitter.com/andrea_e_martin">@andrea_e_martin</a>.</li>



<li>Related papers
<ul>
<li><a href="https://pure.mpg.de/rest/items/item_3196366_5/component/file_3240713/content">A Compositional Neural Architecture for Language</a></li>



<li><a href="https://pure.mpg.de/rest/items/item_3335841_1/component/file_3335842/content">An oscillating computational model can track pseudo-rhythmic speech by using linguistic predictions</a></li>



<li><a href="https://journals.plos.org/plosbiology/article?id=10.1371/journal.pbio.3001713">Neural dynamics differentially encode phrases and sentences during spoken language comprehension</a></li>



<li><a href="https://psyarxiv.com/x59un/">Hierarchical structure in language and action: A formal comparison</a></li>
</ul>
</li>



<li>Andrea mentions this book: <a href="https://amzn.to/3NDa5Cd">The Geometry of Biological Time</a>.</li>
</ul>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/b687ec1b-1d97-4317-a50f-743f73547b52-169-Andrea-Martin-PublicPM.mp3" length="98470438"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Support the show to get full episodes and join the Discord community.







Check out my free video series about what's missing in AI and Neuroscience









My guest today is Andrea Martin, who is the Research Group Leader in the department of Language and Computation in Neural Systems at the Max Plank Institute and the Donders Institute. Andrea is deeply interested in understanding how our biological brains process and represent language. To this end, she is developing a theoretical model of language. The aim of the model is to account for the properties of language, like its structure, its compositionality, its infinite expressibility, while adhering to physiological data we can measure from human brains.



Her theoretical model of language, among other things, brings in the idea of low-dimensional manifolds and neural dynamics along those manifolds. We've discussed manifolds a lot on the podcast, but they are a kind of abstract structure in the space of possible neural population activity - the neural dynamics. And that manifold structure defines the range of possible trajectories, or pathways, the neural dynamics can take over  time.



One of Andrea's ideas is that manifolds might be a way for the brain to combine two properties of how we learn and use language. One of those properties is the statistical regularities found in language - a given word, for example, occurs more often near some words and less often near some other words. This statistical approach is the foundation of how large language models are trained. The other property is the more formal structure of language: how it's arranged and organized in such a way that gives it meaning to us. Perhaps these two properties of language can come together as a single trajectory along a neural manifold. But she has lots of ideas, and we discuss many of them. And of course we discuss large language models, and how Andrea thinks of them with respect to biological cognition. We talk about modeling in general and what models do and don't tell us, and much more.




Andrea's website.



Twitter: @andrea_e_martin.



Related papers

A Compositional Neural Architecture for Language



An oscillating computational model can track pseudo-rhythmic speech by using linguistic predictions



Neural dynamics differentially encode phrases and sentences during spoken language comprehension



Hierarchical structure in language and action: A formal comparison





Andrea mentions this book: The Geometry of Biological Time.
]]>
                </itunes:summary>
                                                                            <itunes:duration>01:41:30</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 168 Frauke Sandig and Eric Black w Alex Gomez-Marin: AWARE: Glimpses of Consciousness]]>
                </title>
                <pubDate>Fri, 02 Jun 2023 15:42:22 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/1489677</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-168-frauke-sandig-and-eric-black-w-alex-gomez-marin-aware-glimpses-of-consciousness</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener">Check out my free video series about what's missing in AI and Neuroscience</a></p>



<a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener"></a>



<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>







<p>This is one in a periodic series of episodes with <a href="https://braininspired.co/podcast/136/">Alex Gomez-Marin</a>, exploring how the arts and humanities can impact (neuro)science. Artistic creations, like cinema, have the ability to momentarily lower our ever-critical scientific mindset and allow us to imagine alternate possibilities and experience emotions outside our normal scientific routines. Might this feature of art potentially change our scientific attitudes and perspectives?</p>



<p>Frauke Sandig and Eric Black recently made the documentary film <a href="https://aware-film.com/">AWARE: Glimpses of Consciousness</a>, which profiles six researchers studying consciousness from different perspectives. The film is filled with rich visual imagery and conveys a sense of wonder and awe in trying to understand subjective experience, while diving deep into the reflections of the scientists and thinkers approaching the topic from their various perspectives.</p>



<p>This isn't a "normal" Brain Inspired episode, but I hope you enjoy the discussion!</p>



<ul>
<li><a href="https://aware-film.com/">AWARE: Glimpses of Consciousness</a></li>



<li><a href="https://umbrellafilms.org/">Umbrella Films</a></li>
</ul>



<p>0:00 - Intro
19:42 - Mechanistic reductionism
45:33 - Changing views during lifetime
53:49 - Did making the film alter your views?
57:49 - ChatGPT
1:04:20 - Materialist assumption
1:11:00 - Science of consciousness
1:20:49 - Transhumanism
1:32:01 - Integrity
1:36:19 - Aesthetics
1:39:50 - Response to the film</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Check out my free video series about what's missing in AI and Neuroscience







Support the show to get full episodes and join the Discord community.











This is one in a periodic series of episodes with Alex Gomez-Marin, exploring how the arts and humanities can impact (neuro)science. Artistic creations, like cinema, have the ability to momentarily lower our ever-critical scientific mindset and allow us to imagine alternate possibilities and experience emotions outside our normal scientific routines. Might this feature of art potentially change our scientific attitudes and perspectives?



Frauke Sandig and Eric Black recently made the documentary film AWARE: Glimpses of Consciousness, which profiles six researchers studying consciousness from different perspectives. The film is filled with rich visual imagery and conveys a sense of wonder and awe in trying to understand subjective experience, while diving deep into the reflections of the scientists and thinkers approaching the topic from their various perspectives.



This isn't a "normal" Brain Inspired episode, but I hope you enjoy the discussion!




AWARE: Glimpses of Consciousness



Umbrella Films




0:00 - Intro
19:42 - Mechanistic reductionism
45:33 - Changing views during lifetime
53:49 - Did making the film alter your views?
57:49 - ChatGPT
1:04:20 - Materialist assumption
1:11:00 - Science of consciousness
1:20:49 - Transhumanism
1:32:01 - Integrity
1:36:19 - Aesthetics
1:39:50 - Response to the film]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 168 Frauke Sandig and Eric Black w Alex Gomez-Marin: AWARE: Glimpses of Consciousness]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener">Check out my free video series about what's missing in AI and Neuroscience</a></p>



<a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener"></a>



<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>







<p>This is one in a periodic series of episodes with <a href="https://braininspired.co/podcast/136/">Alex Gomez-Marin</a>, exploring how the arts and humanities can impact (neuro)science. Artistic creations, like cinema, have the ability to momentarily lower our ever-critical scientific mindset and allow us to imagine alternate possibilities and experience emotions outside our normal scientific routines. Might this feature of art potentially change our scientific attitudes and perspectives?</p>



<p>Frauke Sandig and Eric Black recently made the documentary film <a href="https://aware-film.com/">AWARE: Glimpses of Consciousness</a>, which profiles six researchers studying consciousness from different perspectives. The film is filled with rich visual imagery and conveys a sense of wonder and awe in trying to understand subjective experience, while diving deep into the reflections of the scientists and thinkers approaching the topic from their various perspectives.</p>



<p>This isn't a "normal" Brain Inspired episode, but I hope you enjoy the discussion!</p>



<ul>
<li><a href="https://aware-film.com/">AWARE: Glimpses of Consciousness</a></li>



<li><a href="https://umbrellafilms.org/">Umbrella Films</a></li>
</ul>



<p>0:00 - Intro
19:42 - Mechanistic reductionism
45:33 - Changing views during lifetime
53:49 - Did making the film alter your views?
57:49 - ChatGPT
1:04:20 - Materialist assumption
1:11:00 - Science of consciousness
1:20:49 - Transhumanism
1:32:01 - Integrity
1:36:19 - Aesthetics
1:39:50 - Response to the film</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/f05c0838-8393-4665-9582-06638b403674-Special-w-AGM-Frauke-Sandig-and-Erik-Black-.mp3" length="111911327"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Check out my free video series about what's missing in AI and Neuroscience







Support the show to get full episodes and join the Discord community.











This is one in a periodic series of episodes with Alex Gomez-Marin, exploring how the arts and humanities can impact (neuro)science. Artistic creations, like cinema, have the ability to momentarily lower our ever-critical scientific mindset and allow us to imagine alternate possibilities and experience emotions outside our normal scientific routines. Might this feature of art potentially change our scientific attitudes and perspectives?



Frauke Sandig and Eric Black recently made the documentary film AWARE: Glimpses of Consciousness, which profiles six researchers studying consciousness from different perspectives. The film is filled with rich visual imagery and conveys a sense of wonder and awe in trying to understand subjective experience, while diving deep into the reflections of the scientists and thinkers approaching the topic from their various perspectives.



This isn't a "normal" Brain Inspired episode, but I hope you enjoy the discussion!




AWARE: Glimpses of Consciousness



Umbrella Films




0:00 - Intro
19:42 - Mechanistic reductionism
45:33 - Changing views during lifetime
53:49 - Did making the film alter your views?
57:49 - ChatGPT
1:04:20 - Materialist assumption
1:11:00 - Science of consciousness
1:20:49 - Transhumanism
1:32:01 - Integrity
1:36:19 - Aesthetics
1:39:50 - Response to the film]]>
                </itunes:summary>
                                                                            <itunes:duration>01:54:42</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 167 Panayiota Poirazi: AI Brains Need Dendrites]]>
                </title>
                <pubDate>Sat, 27 May 2023 15:22:17 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/1486288</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-167-panayiota-poirazi-ai-brains-need-dendrites</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>



<p class="has-text-align-center"><a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener">Check out my free video series about what's missing in AI and Neuroscience</a></p>



<a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener"></a>







<p>Panayiota Poirazi runs the Poirazi Lab at the FORTH Institute of Molecular Biology and Biotechnology, and Yiota loves dendrites, those branching tree-like structures sticking out of all your neurons, and she thinks you should love dendrites, too, whether you study biological or artificial intelligence. In neuroscience, the old story was that dendrites just reach out and collect incoming signals for the all-important neuron cell body to process. Yiota, and people Like Matthew Larkum, <a href="https://braininspired.co/podcast/138/">with whom I chatted in episode 138</a>, are continuing to demonstrate that dendrites are themselves computationally complex and powerful, doing many varieties of important signal transformation before signals reach the cell body. For example, in 2003, Yiota showed that because of dendrites, a single neuron can act as a two-layer artificial neural network, and since then others have shown single neurons can act as deeper and deeper multi-layer networks.  In Yiota's opinion, an even more important function of dendrites is increased computing efficiency, something evolution favors and something artificial networks need to favor as well moving forward.</p>



<ul>
<li><a href="https://dendrites.gr/">Poirazi Lab</a>

</li>



<li>Twitter: <a href="https://twitter.com/yiotapoirazi">@YiotaPoirazi</a>.</li>



<li>Related papers
<ul>
<li><a href="https://zenodo.org/record/4955397">Drawing Inspiration from Biological Dendrites to Empower Artificial Neural Networks</a>.</li>



<li><a href="https://doi.org/10.1038/s41583-020-0301-7">Illuminating dendritic function with computational models</a>.</li>



<li><a href="https://www.nature.com/articles/s41467-022-35747-8.pdf">Introducing the Dendrify framework for incorporating dendrites to spiking neural networks</a>.</li>



<li><a href="https://www.cell.com/neuron/fulltext/S0896-6273(03)00149-1">Pyramidal Neuron as Two-Layer Neural Network</a></li>
</ul>
</li>
</ul>



<p>0:00 - Intro
3:04 - Yiota's background
6:40 - Artificial networks and dendrites
9:24 - Dendrites special sauce?
14:50 - Where are we in understanding dendrite function?
20:29 - Algorithms, plasticity, and brains
29:00 - Functional unit of the brain
42:43 - Engrams
51:03 - Dendrites and nonlinearity
54:51 - Spiking neural networks
56:02 - Best level of biological detail
57:52 - Dendrify
1:05:41 - Experimental work
1:10:58 - Dendrites across species and development
1:16:50 - Career reflection
1:17:57 - Evolution of Yiota's thinking</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Support the show to get full episodes and join the Discord community.







Check out my free video series about what's missing in AI and Neuroscience











Panayiota Poirazi runs the Poirazi Lab at the FORTH Institute of Molecular Biology and Biotechnology, and Yiota loves dendrites, those branching tree-like structures sticking out of all your neurons, and she thinks you should love dendrites, too, whether you study biological or artificial intelligence. In neuroscience, the old story was that dendrites just reach out and collect incoming signals for the all-important neuron cell body to process. Yiota, and people Like Matthew Larkum, with whom I chatted in episode 138, are continuing to demonstrate that dendrites are themselves computationally complex and powerful, doing many varieties of important signal transformation before signals reach the cell body. For example, in 2003, Yiota showed that because of dendrites, a single neuron can act as a two-layer artificial neural network, and since then others have shown single neurons can act as deeper and deeper multi-layer networks.  In Yiota's opinion, an even more important function of dendrites is increased computing efficiency, something evolution favors and something artificial networks need to favor as well moving forward.




Poirazi Lab





Twitter: @YiotaPoirazi.



Related papers

Drawing Inspiration from Biological Dendrites to Empower Artificial Neural Networks.



Illuminating dendritic function with computational models.



Introducing the Dendrify framework for incorporating dendrites to spiking neural networks.



Pyramidal Neuron as Two-Layer Neural Network






0:00 - Intro
3:04 - Yiota's background
6:40 - Artificial networks and dendrites
9:24 - Dendrites special sauce?
14:50 - Where are we in understanding dendrite function?
20:29 - Algorithms, plasticity, and brains
29:00 - Functional unit of the brain
42:43 - Engrams
51:03 - Dendrites and nonlinearity
54:51 - Spiking neural networks
56:02 - Best level of biological detail
57:52 - Dendrify
1:05:41 - Experimental work
1:10:58 - Dendrites across species and development
1:16:50 - Career reflection
1:17:57 - Evolution of Yiota's thinking]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 167 Panayiota Poirazi: AI Brains Need Dendrites]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>



<p class="has-text-align-center"><a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener">Check out my free video series about what's missing in AI and Neuroscience</a></p>



<a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener"></a>







<p>Panayiota Poirazi runs the Poirazi Lab at the FORTH Institute of Molecular Biology and Biotechnology, and Yiota loves dendrites, those branching tree-like structures sticking out of all your neurons, and she thinks you should love dendrites, too, whether you study biological or artificial intelligence. In neuroscience, the old story was that dendrites just reach out and collect incoming signals for the all-important neuron cell body to process. Yiota, and people Like Matthew Larkum, <a href="https://braininspired.co/podcast/138/">with whom I chatted in episode 138</a>, are continuing to demonstrate that dendrites are themselves computationally complex and powerful, doing many varieties of important signal transformation before signals reach the cell body. For example, in 2003, Yiota showed that because of dendrites, a single neuron can act as a two-layer artificial neural network, and since then others have shown single neurons can act as deeper and deeper multi-layer networks.  In Yiota's opinion, an even more important function of dendrites is increased computing efficiency, something evolution favors and something artificial networks need to favor as well moving forward.</p>



<ul>
<li><a href="https://dendrites.gr/">Poirazi Lab</a>

</li>



<li>Twitter: <a href="https://twitter.com/yiotapoirazi">@YiotaPoirazi</a>.</li>



<li>Related papers
<ul>
<li><a href="https://zenodo.org/record/4955397">Drawing Inspiration from Biological Dendrites to Empower Artificial Neural Networks</a>.</li>



<li><a href="https://doi.org/10.1038/s41583-020-0301-7">Illuminating dendritic function with computational models</a>.</li>



<li><a href="https://www.nature.com/articles/s41467-022-35747-8.pdf">Introducing the Dendrify framework for incorporating dendrites to spiking neural networks</a>.</li>



<li><a href="https://www.cell.com/neuron/fulltext/S0896-6273(03)00149-1">Pyramidal Neuron as Two-Layer Neural Network</a></li>
</ul>
</li>
</ul>



<p>0:00 - Intro
3:04 - Yiota's background
6:40 - Artificial networks and dendrites
9:24 - Dendrites special sauce?
14:50 - Where are we in understanding dendrite function?
20:29 - Algorithms, plasticity, and brains
29:00 - Functional unit of the brain
42:43 - Engrams
51:03 - Dendrites and nonlinearity
54:51 - Spiking neural networks
56:02 - Best level of biological detail
57:52 - Dendrify
1:05:41 - Experimental work
1:10:58 - Dendrites across species and development
1:16:50 - Career reflection
1:17:57 - Evolution of Yiota's thinking</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/2e741eb2-19ed-4f12-90f0-0146ede47983-167-Panyiota-Poirazi-Public.mp3" length="86377995"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Support the show to get full episodes and join the Discord community.







Check out my free video series about what's missing in AI and Neuroscience











Panayiota Poirazi runs the Poirazi Lab at the FORTH Institute of Molecular Biology and Biotechnology, and Yiota loves dendrites, those branching tree-like structures sticking out of all your neurons, and she thinks you should love dendrites, too, whether you study biological or artificial intelligence. In neuroscience, the old story was that dendrites just reach out and collect incoming signals for the all-important neuron cell body to process. Yiota, and people Like Matthew Larkum, with whom I chatted in episode 138, are continuing to demonstrate that dendrites are themselves computationally complex and powerful, doing many varieties of important signal transformation before signals reach the cell body. For example, in 2003, Yiota showed that because of dendrites, a single neuron can act as a two-layer artificial neural network, and since then others have shown single neurons can act as deeper and deeper multi-layer networks.  In Yiota's opinion, an even more important function of dendrites is increased computing efficiency, something evolution favors and something artificial networks need to favor as well moving forward.




Poirazi Lab





Twitter: @YiotaPoirazi.



Related papers

Drawing Inspiration from Biological Dendrites to Empower Artificial Neural Networks.



Illuminating dendritic function with computational models.



Introducing the Dendrify framework for incorporating dendrites to spiking neural networks.



Pyramidal Neuron as Two-Layer Neural Network






0:00 - Intro
3:04 - Yiota's background
6:40 - Artificial networks and dendrites
9:24 - Dendrites special sauce?
14:50 - Where are we in understanding dendrite function?
20:29 - Algorithms, plasticity, and brains
29:00 - Functional unit of the brain
42:43 - Engrams
51:03 - Dendrites and nonlinearity
54:51 - Spiking neural networks
56:02 - Best level of biological detail
57:52 - Dendrify
1:05:41 - Experimental work
1:10:58 - Dendrites across species and development
1:16:50 - Career reflection
1:17:57 - Evolution of Yiota's thinking]]>
                </itunes:summary>
                                                                            <itunes:duration>01:27:43</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 166 Nick Enfield: Language vs. Reality]]>
                </title>
                <pubDate>Tue, 09 May 2023 18:00:02 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/1474756</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-166-nick-enfield-language-vs-reality</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>



<p class="has-text-align-center"><a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener">Check out my free video series about what's missing in AI and Neuroscience</a></p>



<a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener"></a>







<p>Nick Enfield is a professor of linguistics at the University of Sydney. In this episode we discuss topics in his most recent book, <a href="https://amzn.to/40NK3Qm">Language vs. Reality: Why Language Is Good for Lawyers and Bad for Scientists</a>. A central question in the book is what is language for? What's the function of language. You might be familiar with the debate about whether language evolved for each of us thinking our wonderful human thoughts, or for communicating those thoughts between each other. Nick would be on the communication side of that debate, but if by communication we mean simply the transmission of thoughts or information between people - I have a thought, I send it to you in language, and that thought is now in your head - then Nick wouldn't take either side of that debate. He argues the function language goes beyond the transmission of information, and instead is primarily an evolved solution for social coordination - coordinating our behaviors and attention. When we use language, we're creating maps in our heads so we can agree on where to go.</p>





<p>For example, when I say, "This is brain inspired," I'm pointing you to a place to meet me on a conceptual map, saying, "Get ready, we're about to have a great time again!"  In any case, with those 4 words, "This is brain inspired," I'm not just transmitting information from my head into your head. I'm providing you with a landmark so you can focus your attention appropriately.</p>



<p>From that premise, that language is about social coordination, we talk about a handful of topics in his book, like the relationship <em>between</em> language and reality, the idea that all language is framing- that is, how we say something influences how to think about it. We discuss how our language changes in different social situations, the role of stories, and of course, how LLMs fit into Nick's story about language.</p>



<ul>
<li><a href="https://nickenfield.org/">Nick's website</a></li>



<li>Twitter: <a href="https://twitter.com/njenfield">@njenfield</a></li>



<li>Book:
<ul>
<li><a href="https://amzn.to/40NK3Qm">Language vs. Reality: Why Language Is Good for Lawyers and Bad for Scientists</a>.</li>
</ul>
</li>



<li>Papers:
<ul>
<li><a href="https://royalsocietypublishing.org/doi/10.1098/rstb.2021.0352">Linguistic concepts are self-generating choice architectures</a></li>
</ul>
</li>
</ul>



<p>0:00 - Intro
4:23 - Is learning about language important?
15:43 - Linguistic Anthropology
28:56 - Language and truth
33:57 - How special is language
46:19 - Choice architecture and framing
48:19 - Language for thinking or communication
52:30 - Agency and language
56:51 - Large language models
1:16:18 - Getting language right
1:20:48 - Social relationships and language</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Support the show to get full episodes and join the Discord community.







Check out my free video series about what's missing in AI and Neuroscience











Nick Enfield is a professor of linguistics at the University of Sydney. In this episode we discuss topics in his most recent book, Language vs. Reality: Why Language Is Good for Lawyers and Bad for Scientists. A central question in the book is what is language for? What's the function of language. You might be familiar with the debate about whether language evolved for each of us thinking our wonderful human thoughts, or for communicating those thoughts between each other. Nick would be on the communication side of that debate, but if by communication we mean simply the transmission of thoughts or information between people - I have a thought, I send it to you in language, and that thought is now in your head - then Nick wouldn't take either side of that debate. He argues the function language goes beyond the transmission of information, and instead is primarily an evolved solution for social coordination - coordinating our behaviors and attention. When we use language, we're creating maps in our heads so we can agree on where to go.





For example, when I say, "This is brain inspired," I'm pointing you to a place to meet me on a conceptual map, saying, "Get ready, we're about to have a great time again!"  In any case, with those 4 words, "This is brain inspired," I'm not just transmitting information from my head into your head. I'm providing you with a landmark so you can focus your attention appropriately.



From that premise, that language is about social coordination, we talk about a handful of topics in his book, like the relationship between language and reality, the idea that all language is framing- that is, how we say something influences how to think about it. We discuss how our language changes in different social situations, the role of stories, and of course, how LLMs fit into Nick's story about language.




Nick's website



Twitter: @njenfield



Book:

Language vs. Reality: Why Language Is Good for Lawyers and Bad for Scientists.





Papers:

Linguistic concepts are self-generating choice architectures






0:00 - Intro
4:23 - Is learning about language important?
15:43 - Linguistic Anthropology
28:56 - Language and truth
33:57 - How special is language
46:19 - Choice architecture and framing
48:19 - Language for thinking or communication
52:30 - Agency and language
56:51 - Large language models
1:16:18 - Getting language right
1:20:48 - Social relationships and language]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 166 Nick Enfield: Language vs. Reality]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>



<p class="has-text-align-center"><a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener">Check out my free video series about what's missing in AI and Neuroscience</a></p>



<a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener"></a>







<p>Nick Enfield is a professor of linguistics at the University of Sydney. In this episode we discuss topics in his most recent book, <a href="https://amzn.to/40NK3Qm">Language vs. Reality: Why Language Is Good for Lawyers and Bad for Scientists</a>. A central question in the book is what is language for? What's the function of language. You might be familiar with the debate about whether language evolved for each of us thinking our wonderful human thoughts, or for communicating those thoughts between each other. Nick would be on the communication side of that debate, but if by communication we mean simply the transmission of thoughts or information between people - I have a thought, I send it to you in language, and that thought is now in your head - then Nick wouldn't take either side of that debate. He argues the function language goes beyond the transmission of information, and instead is primarily an evolved solution for social coordination - coordinating our behaviors and attention. When we use language, we're creating maps in our heads so we can agree on where to go.</p>





<p>For example, when I say, "This is brain inspired," I'm pointing you to a place to meet me on a conceptual map, saying, "Get ready, we're about to have a great time again!"  In any case, with those 4 words, "This is brain inspired," I'm not just transmitting information from my head into your head. I'm providing you with a landmark so you can focus your attention appropriately.</p>



<p>From that premise, that language is about social coordination, we talk about a handful of topics in his book, like the relationship <em>between</em> language and reality, the idea that all language is framing- that is, how we say something influences how to think about it. We discuss how our language changes in different social situations, the role of stories, and of course, how LLMs fit into Nick's story about language.</p>



<ul>
<li><a href="https://nickenfield.org/">Nick's website</a></li>



<li>Twitter: <a href="https://twitter.com/njenfield">@njenfield</a></li>



<li>Book:
<ul>
<li><a href="https://amzn.to/40NK3Qm">Language vs. Reality: Why Language Is Good for Lawyers and Bad for Scientists</a>.</li>
</ul>
</li>



<li>Papers:
<ul>
<li><a href="https://royalsocietypublishing.org/doi/10.1098/rstb.2021.0352">Linguistic concepts are self-generating choice architectures</a></li>
</ul>
</li>
</ul>



<p>0:00 - Intro
4:23 - Is learning about language important?
15:43 - Linguistic Anthropology
28:56 - Language and truth
33:57 - How special is language
46:19 - Choice architecture and framing
48:19 - Language for thinking or communication
52:30 - Agency and language
56:51 - Large language models
1:16:18 - Getting language right
1:20:48 - Social relationships and language</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/8d69f987-5bca-4d77-ab25-ba3cdfe1ea64-166-Nick-Enfield-Public.mp3" length="85403105"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Support the show to get full episodes and join the Discord community.







Check out my free video series about what's missing in AI and Neuroscience











Nick Enfield is a professor of linguistics at the University of Sydney. In this episode we discuss topics in his most recent book, Language vs. Reality: Why Language Is Good for Lawyers and Bad for Scientists. A central question in the book is what is language for? What's the function of language. You might be familiar with the debate about whether language evolved for each of us thinking our wonderful human thoughts, or for communicating those thoughts between each other. Nick would be on the communication side of that debate, but if by communication we mean simply the transmission of thoughts or information between people - I have a thought, I send it to you in language, and that thought is now in your head - then Nick wouldn't take either side of that debate. He argues the function language goes beyond the transmission of information, and instead is primarily an evolved solution for social coordination - coordinating our behaviors and attention. When we use language, we're creating maps in our heads so we can agree on where to go.





For example, when I say, "This is brain inspired," I'm pointing you to a place to meet me on a conceptual map, saying, "Get ready, we're about to have a great time again!"  In any case, with those 4 words, "This is brain inspired," I'm not just transmitting information from my head into your head. I'm providing you with a landmark so you can focus your attention appropriately.



From that premise, that language is about social coordination, we talk about a handful of topics in his book, like the relationship between language and reality, the idea that all language is framing- that is, how we say something influences how to think about it. We discuss how our language changes in different social situations, the role of stories, and of course, how LLMs fit into Nick's story about language.




Nick's website



Twitter: @njenfield



Book:

Language vs. Reality: Why Language Is Good for Lawyers and Bad for Scientists.





Papers:

Linguistic concepts are self-generating choice architectures






0:00 - Intro
4:23 - Is learning about language important?
15:43 - Linguistic Anthropology
28:56 - Language and truth
33:57 - How special is language
46:19 - Choice architecture and framing
48:19 - Language for thinking or communication
52:30 - Agency and language
56:51 - Large language models
1:16:18 - Getting language right
1:20:48 - Social relationships and language]]>
                </itunes:summary>
                                                                            <itunes:duration>01:27:12</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 165 Jeffrey Bowers: Psychology Gets No Respect]]>
                </title>
                <pubDate>Wed, 12 Apr 2023 15:46:38 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/1457327</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-165-jeffrey-bowers-psychology-gets-no-respect</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener">Check out my free video series about what's missing in AI and Neuroscience</a></p>



<a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener"></a>



<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>







<p>Jeffrey Bowers is a psychologist and professor at the University of Bristol. As you know, many of my previous guests are in the business of comparing brain activity to the activity of units in artificial neural network models, when humans or animals and the models are performing the same tasks. And a big story that has emerged over the past decade or so is that there's a remarkable similarity between the activities and representations in brains and models. This was originally found in object categorization tasks, where the goal is to name the object shown in a given image, where researchers have compared the activity in the models good at doing that to the activity in the parts of our brains good at doing that. It's been found in various other tasks using various other models and analyses, many of which we've discussed on previous episodes, and more recently a similar story has emerged regarding a similarity between language-related activity in our brains and the activity in large language models. Namely, the ability of our brains to predict an upcoming word can been correlated with the models ability to predict an upcoming word. So the word is that these deep learning type models are the best models of how our brains and cognition work.</p>



<p>However, this is where Jeff Bowers comes in and raises the psychology flag, so to speak. His message is that these predictive approaches to comparing artificial and biological cognition aren't enough, and can mask important differences between them. And what we need to do is start performing more hypothesis driven tests like those performed in psychology, for example, to ask whether the models are indeed solving tasks like our brains and minds do. Jeff and his group, among others, have been doing just that are discovering differences in models and minds that may be important if we want to use models to understand minds. We discuss some of his work and thoughts in this regard, and a lot more.</p>



<ul>
<li><a href="https://jeffbowers.blogs.bristol.ac.uk/">Website</a></li>



<li>Twitter: <a href="https://twitter.com/jeffrey_bowers">@jeffrey_bowers</a></li>



<li>Related papers:
<ul>
<li><a href="https://psyarxiv.com/5zf4s/">Deep Problems with Neural Network Models of Human Vision</a>.</li>



<li><a href="https://bpb-eu-w2.wpmucdn.com/blogs.bristol.ac.uk/dist/b/403/files/2017/11/bowers-tics-2017.pdf">Parallel Distributed Processing Theory in the Age of Deep Networks</a>.</li>



<li><a href="https://arxiv.org/pdf/2204.03740.pdf">Successes and critical failures of neural networks in capturing human-like speech recognition</a>.</li>
</ul>
</li>
</ul>



<p>0:00 - Intro
3:52 - Testing neural networks
5:35 - Neuro-AI needs psychology
23:36 - Experiments in AI and neuroscience
23:51 - Why build networks like our minds?
44:55 - Vision problem spaces, solution spaces, training data
55:45 - Do we implement algorithms?
1:01:33 - Relational and combinatorial cognition
1:06:17 - Comparing representations in different networks
1:12:31 - Large language models
1:21:10 - Teaching LLMs nonsense languages</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Check out my free video series about what's missing in AI and Neuroscience







Support the show to get full episodes and join the Discord community.











Jeffrey Bowers is a psychologist and professor at the University of Bristol. As you know, many of my previous guests are in the business of comparing brain activity to the activity of units in artificial neural network models, when humans or animals and the models are performing the same tasks. And a big story that has emerged over the past decade or so is that there's a remarkable similarity between the activities and representations in brains and models. This was originally found in object categorization tasks, where the goal is to name the object shown in a given image, where researchers have compared the activity in the models good at doing that to the activity in the parts of our brains good at doing that. It's been found in various other tasks using various other models and analyses, many of which we've discussed on previous episodes, and more recently a similar story has emerged regarding a similarity between language-related activity in our brains and the activity in large language models. Namely, the ability of our brains to predict an upcoming word can been correlated with the models ability to predict an upcoming word. So the word is that these deep learning type models are the best models of how our brains and cognition work.



However, this is where Jeff Bowers comes in and raises the psychology flag, so to speak. His message is that these predictive approaches to comparing artificial and biological cognition aren't enough, and can mask important differences between them. And what we need to do is start performing more hypothesis driven tests like those performed in psychology, for example, to ask whether the models are indeed solving tasks like our brains and minds do. Jeff and his group, among others, have been doing just that are discovering differences in models and minds that may be important if we want to use models to understand minds. We discuss some of his work and thoughts in this regard, and a lot more.




Website



Twitter: @jeffrey_bowers



Related papers:

Deep Problems with Neural Network Models of Human Vision.



Parallel Distributed Processing Theory in the Age of Deep Networks.



Successes and critical failures of neural networks in capturing human-like speech recognition.






0:00 - Intro
3:52 - Testing neural networks
5:35 - Neuro-AI needs psychology
23:36 - Experiments in AI and neuroscience
23:51 - Why build networks like our minds?
44:55 - Vision problem spaces, solution spaces, training data
55:45 - Do we implement algorithms?
1:01:33 - Relational and combinatorial cognition
1:06:17 - Comparing representations in different networks
1:12:31 - Large language models
1:21:10 - Teaching LLMs nonsense languages]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 165 Jeffrey Bowers: Psychology Gets No Respect]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener">Check out my free video series about what's missing in AI and Neuroscience</a></p>



<a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener"></a>



<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>







<p>Jeffrey Bowers is a psychologist and professor at the University of Bristol. As you know, many of my previous guests are in the business of comparing brain activity to the activity of units in artificial neural network models, when humans or animals and the models are performing the same tasks. And a big story that has emerged over the past decade or so is that there's a remarkable similarity between the activities and representations in brains and models. This was originally found in object categorization tasks, where the goal is to name the object shown in a given image, where researchers have compared the activity in the models good at doing that to the activity in the parts of our brains good at doing that. It's been found in various other tasks using various other models and analyses, many of which we've discussed on previous episodes, and more recently a similar story has emerged regarding a similarity between language-related activity in our brains and the activity in large language models. Namely, the ability of our brains to predict an upcoming word can been correlated with the models ability to predict an upcoming word. So the word is that these deep learning type models are the best models of how our brains and cognition work.</p>



<p>However, this is where Jeff Bowers comes in and raises the psychology flag, so to speak. His message is that these predictive approaches to comparing artificial and biological cognition aren't enough, and can mask important differences between them. And what we need to do is start performing more hypothesis driven tests like those performed in psychology, for example, to ask whether the models are indeed solving tasks like our brains and minds do. Jeff and his group, among others, have been doing just that are discovering differences in models and minds that may be important if we want to use models to understand minds. We discuss some of his work and thoughts in this regard, and a lot more.</p>



<ul>
<li><a href="https://jeffbowers.blogs.bristol.ac.uk/">Website</a></li>



<li>Twitter: <a href="https://twitter.com/jeffrey_bowers">@jeffrey_bowers</a></li>



<li>Related papers:
<ul>
<li><a href="https://psyarxiv.com/5zf4s/">Deep Problems with Neural Network Models of Human Vision</a>.</li>



<li><a href="https://bpb-eu-w2.wpmucdn.com/blogs.bristol.ac.uk/dist/b/403/files/2017/11/bowers-tics-2017.pdf">Parallel Distributed Processing Theory in the Age of Deep Networks</a>.</li>



<li><a href="https://arxiv.org/pdf/2204.03740.pdf">Successes and critical failures of neural networks in capturing human-like speech recognition</a>.</li>
</ul>
</li>
</ul>



<p>0:00 - Intro
3:52 - Testing neural networks
5:35 - Neuro-AI needs psychology
23:36 - Experiments in AI and neuroscience
23:51 - Why build networks like our minds?
44:55 - Vision problem spaces, solution spaces, training data
55:45 - Do we implement algorithms?
1:01:33 - Relational and combinatorial cognition
1:06:17 - Comparing representations in different networks
1:12:31 - Large language models
1:21:10 - Teaching LLMs nonsense languages</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/667144e7-a5b1-4fb2-be67-32119284fbd8-165-Jeff-Bowers-public.mp3" length="95098494"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Check out my free video series about what's missing in AI and Neuroscience







Support the show to get full episodes and join the Discord community.











Jeffrey Bowers is a psychologist and professor at the University of Bristol. As you know, many of my previous guests are in the business of comparing brain activity to the activity of units in artificial neural network models, when humans or animals and the models are performing the same tasks. And a big story that has emerged over the past decade or so is that there's a remarkable similarity between the activities and representations in brains and models. This was originally found in object categorization tasks, where the goal is to name the object shown in a given image, where researchers have compared the activity in the models good at doing that to the activity in the parts of our brains good at doing that. It's been found in various other tasks using various other models and analyses, many of which we've discussed on previous episodes, and more recently a similar story has emerged regarding a similarity between language-related activity in our brains and the activity in large language models. Namely, the ability of our brains to predict an upcoming word can been correlated with the models ability to predict an upcoming word. So the word is that these deep learning type models are the best models of how our brains and cognition work.



However, this is where Jeff Bowers comes in and raises the psychology flag, so to speak. His message is that these predictive approaches to comparing artificial and biological cognition aren't enough, and can mask important differences between them. And what we need to do is start performing more hypothesis driven tests like those performed in psychology, for example, to ask whether the models are indeed solving tasks like our brains and minds do. Jeff and his group, among others, have been doing just that are discovering differences in models and minds that may be important if we want to use models to understand minds. We discuss some of his work and thoughts in this regard, and a lot more.




Website



Twitter: @jeffrey_bowers



Related papers:

Deep Problems with Neural Network Models of Human Vision.



Parallel Distributed Processing Theory in the Age of Deep Networks.



Successes and critical failures of neural networks in capturing human-like speech recognition.






0:00 - Intro
3:52 - Testing neural networks
5:35 - Neuro-AI needs psychology
23:36 - Experiments in AI and neuroscience
23:51 - Why build networks like our minds?
44:55 - Vision problem spaces, solution spaces, training data
55:45 - Do we implement algorithms?
1:01:33 - Relational and combinatorial cognition
1:06:17 - Comparing representations in different networks
1:12:31 - Large language models
1:21:10 - Teaching LLMs nonsense languages]]>
                </itunes:summary>
                                                                            <itunes:duration>01:38:45</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 164 Gary Lupyan: How Language Affects Thought]]>
                </title>
                <pubDate>Sat, 01 Apr 2023 12:07:51 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/1450173</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-164-gary-lupyan-how-language-affects-thought</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>



<p class="has-text-align-center"><a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener">Check out my free video series about what's missing in AI and Neuroscience</a></p>



<a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener"></a>







<p>Gary Lupyan runs the <a href="http://sapir.psych.wisc.edu/">Lupyan Lab</a> at University of Wisconsin, Madison, where he studies how language and cognition are related. In some ways, this is a continuation of the conversation I had <a href="https://braininspired.co/podcast/163/">last episode with Ellie Pavlick</a>, in that we  partly continue to discuss large language models. But Gary is more focused on how language, and naming things, categorizing things, changes our cognition related those things. How does naming something change our perception of it, and so on. He's interested in how concepts come about, how they map onto language. So we talk about some of his work and ideas related to those topics.</p>



<p>And we actually start the discussion with some of Gary's work related the variability of individual humans' phenomenal experience, and how that affects our individual cognition. For instance, some people are more visual thinkers, others are more verbal, and there seems to be an appreciable spectrum of differences that Gary is beginning to experimentally test.</p>



<ul>
<li><a href="http://sapir.psych.wisc.edu/">Lupyan Lab</a>.</li>



<li>Twitter: <a href="https://twitter.com/glupyan">@glupyan</a>.</li>



<li>Related papers:
<ul>
<li><a href="http://sapir.psych.wisc.edu/papers/lupyan_uchiyama_thompson_casasanto_2023.pdf">Hidden Differences in Phenomenal Experience</a>.</li>



<li><a href="http://sapir.psych.wisc.edu/papers/nedergaard_wallentin_lupyan_2022.pdf">Verbal interference paradigms: A systematic review investigating the role of language in cognition</a>.</li>
</ul>
</li>



<li>Gary mentioned <a href="">Richard Feynman's Ways of Thinking</a> video.</li>



<li>Gary and Andy Clark's Aeon article: <a href="https://aeon.co/essays/how-might-telepathy-actually-work-outside-the-realm-of-sci-fi">Super-cooperators</a>.</li>
</ul>



<p>0:00 - Intro
2:36 - Words and communication
14:10 - Phenomenal variability
26:24 - Co-operating minds
38:11 - Large language models
40:40 - Neuro-symbolic AI, scale
44:43 - How LLMs have changed Gary's thoughts about language
49:26 - Meaning, grounding, and language
54:26 - Development of language
58:53 - Symbols and emergence
1:03:20 - Language evolution in the LLM era
1:08:05 - Concepts
1:11:17 - How special is language?
1:18:08 - AGI</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Support the show to get full episodes and join the Discord community.







Check out my free video series about what's missing in AI and Neuroscience











Gary Lupyan runs the Lupyan Lab at University of Wisconsin, Madison, where he studies how language and cognition are related. In some ways, this is a continuation of the conversation I had last episode with Ellie Pavlick, in that we  partly continue to discuss large language models. But Gary is more focused on how language, and naming things, categorizing things, changes our cognition related those things. How does naming something change our perception of it, and so on. He's interested in how concepts come about, how they map onto language. So we talk about some of his work and ideas related to those topics.



And we actually start the discussion with some of Gary's work related the variability of individual humans' phenomenal experience, and how that affects our individual cognition. For instance, some people are more visual thinkers, others are more verbal, and there seems to be an appreciable spectrum of differences that Gary is beginning to experimentally test.




Lupyan Lab.



Twitter: @glupyan.



Related papers:

Hidden Differences in Phenomenal Experience.



Verbal interference paradigms: A systematic review investigating the role of language in cognition.





Gary mentioned Richard Feynman's Ways of Thinking video.



Gary and Andy Clark's Aeon article: Super-cooperators.




0:00 - Intro
2:36 - Words and communication
14:10 - Phenomenal variability
26:24 - Co-operating minds
38:11 - Large language models
40:40 - Neuro-symbolic AI, scale
44:43 - How LLMs have changed Gary's thoughts about language
49:26 - Meaning, grounding, and language
54:26 - Development of language
58:53 - Symbols and emergence
1:03:20 - Language evolution in the LLM era
1:08:05 - Concepts
1:11:17 - How special is language?
1:18:08 - AGI]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 164 Gary Lupyan: How Language Affects Thought]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>



<p class="has-text-align-center"><a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener">Check out my free video series about what's missing in AI and Neuroscience</a></p>



<a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener"></a>







<p>Gary Lupyan runs the <a href="http://sapir.psych.wisc.edu/">Lupyan Lab</a> at University of Wisconsin, Madison, where he studies how language and cognition are related. In some ways, this is a continuation of the conversation I had <a href="https://braininspired.co/podcast/163/">last episode with Ellie Pavlick</a>, in that we  partly continue to discuss large language models. But Gary is more focused on how language, and naming things, categorizing things, changes our cognition related those things. How does naming something change our perception of it, and so on. He's interested in how concepts come about, how they map onto language. So we talk about some of his work and ideas related to those topics.</p>



<p>And we actually start the discussion with some of Gary's work related the variability of individual humans' phenomenal experience, and how that affects our individual cognition. For instance, some people are more visual thinkers, others are more verbal, and there seems to be an appreciable spectrum of differences that Gary is beginning to experimentally test.</p>



<ul>
<li><a href="http://sapir.psych.wisc.edu/">Lupyan Lab</a>.</li>



<li>Twitter: <a href="https://twitter.com/glupyan">@glupyan</a>.</li>



<li>Related papers:
<ul>
<li><a href="http://sapir.psych.wisc.edu/papers/lupyan_uchiyama_thompson_casasanto_2023.pdf">Hidden Differences in Phenomenal Experience</a>.</li>



<li><a href="http://sapir.psych.wisc.edu/papers/nedergaard_wallentin_lupyan_2022.pdf">Verbal interference paradigms: A systematic review investigating the role of language in cognition</a>.</li>
</ul>
</li>



<li>Gary mentioned <a href="">Richard Feynman's Ways of Thinking</a> video.</li>



<li>Gary and Andy Clark's Aeon article: <a href="https://aeon.co/essays/how-might-telepathy-actually-work-outside-the-realm-of-sci-fi">Super-cooperators</a>.</li>
</ul>



<p>0:00 - Intro
2:36 - Words and communication
14:10 - Phenomenal variability
26:24 - Co-operating minds
38:11 - Large language models
40:40 - Neuro-symbolic AI, scale
44:43 - How LLMs have changed Gary's thoughts about language
49:26 - Meaning, grounding, and language
54:26 - Development of language
58:53 - Symbols and emergence
1:03:20 - Language evolution in the LLM era
1:08:05 - Concepts
1:11:17 - How special is language?
1:18:08 - AGI</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/870380e8-01fa-4181-8d03-d629e7668dd6-164-Gary-Lupyan-Public.mp3" length="88524580"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Support the show to get full episodes and join the Discord community.







Check out my free video series about what's missing in AI and Neuroscience











Gary Lupyan runs the Lupyan Lab at University of Wisconsin, Madison, where he studies how language and cognition are related. In some ways, this is a continuation of the conversation I had last episode with Ellie Pavlick, in that we  partly continue to discuss large language models. But Gary is more focused on how language, and naming things, categorizing things, changes our cognition related those things. How does naming something change our perception of it, and so on. He's interested in how concepts come about, how they map onto language. So we talk about some of his work and ideas related to those topics.



And we actually start the discussion with some of Gary's work related the variability of individual humans' phenomenal experience, and how that affects our individual cognition. For instance, some people are more visual thinkers, others are more verbal, and there seems to be an appreciable spectrum of differences that Gary is beginning to experimentally test.




Lupyan Lab.



Twitter: @glupyan.



Related papers:

Hidden Differences in Phenomenal Experience.



Verbal interference paradigms: A systematic review investigating the role of language in cognition.





Gary mentioned Richard Feynman's Ways of Thinking video.



Gary and Andy Clark's Aeon article: Super-cooperators.




0:00 - Intro
2:36 - Words and communication
14:10 - Phenomenal variability
26:24 - Co-operating minds
38:11 - Large language models
40:40 - Neuro-symbolic AI, scale
44:43 - How LLMs have changed Gary's thoughts about language
49:26 - Meaning, grounding, and language
54:26 - Development of language
58:53 - Symbols and emergence
1:03:20 - Language evolution in the LLM era
1:08:05 - Concepts
1:11:17 - How special is language?
1:18:08 - AGI]]>
                </itunes:summary>
                                                                            <itunes:duration>01:31:54</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 163 Ellie Pavlick: The Mind of a Language Model]]>
                </title>
                <pubDate>Mon, 20 Mar 2023 19:03:18 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/1440692</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-163-ellie-pavlick-the-mind-of-a-language-model</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>



<p class="has-text-align-center"><a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener">Check out my free video series about what's missing in AI and Neuroscience</a></p>



<a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener"></a>







<p>Ellie Pavlick runs her <a href="https://lunar.cs.brown.edu/#">Language Understanding and Representation Lab</a> at Brown University, where she studies lots of topics related to language. In AI, large language models, sometimes called foundation models, are all the rage these days, with their ability to generate convincing language, although they still make plenty of mistakes. One of the things Ellie is interested in is how these models work, what kinds of representations are being generated in them to produce the language they produce. So we discuss how she's going about studying these models. For example, probing them to see whether something symbolic-like might be implemented in the models, even though they are the deep learning neural network type, which aren't suppose to be able to work in a symbol-like manner. We also discuss whether grounding is required for language understanding - that is, whether a model that produces language well needs to connect with the real world to actually understand the text it generates. We talk about what language is for, the current limitations of large language models, how the models compare to humans, and a lot more.</p>



<ul>
<li><a href="https://lunar.cs.brown.edu/#">Language Understanding and Representation Lab</a></li>



<li>Twitter: <a href="https://twitter.com/brown_nlp?lang=en">@Brown_NLP</a></li>



<li>Related papers
<ul>
<li><a href="https://www.annualreviews.org/doi/pdf/10.1146/annurev-linguistics-031120-122924">Semantic Structure in Deep Learning</a>.</li>



<li><a href="https://aclanthology.org/2022.starsem-1.23.pdf">Pretraining on Interactions for Learning Grounded Affordance Representations</a>.</li>



<li><a href="https://openreview.net/pdf?id=gJcEM8sxHK">Mapping Language Models to Grounded Conceptual Spaces</a>.</li>
</ul>
</li>
</ul>



<p>0:00 - Intro
2:34 - Will LLMs make us dumb?
9:01 - Evolution of language
17:10 - Changing views on language
22:39 - Semantics, grounding, meaning
37:40 - LLMs, humans, and prediction
41:19 - How to evaluate LLMs
51:08 - Structure, semantics, and symbols in models
1:00:08 - Dimensionality
1:02:08 - Limitations of LLMs
1:07:47 - What do linguists think?
1:14:23 - What is language for?</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Support the show to get full episodes and join the Discord community.







Check out my free video series about what's missing in AI and Neuroscience











Ellie Pavlick runs her Language Understanding and Representation Lab at Brown University, where she studies lots of topics related to language. In AI, large language models, sometimes called foundation models, are all the rage these days, with their ability to generate convincing language, although they still make plenty of mistakes. One of the things Ellie is interested in is how these models work, what kinds of representations are being generated in them to produce the language they produce. So we discuss how she's going about studying these models. For example, probing them to see whether something symbolic-like might be implemented in the models, even though they are the deep learning neural network type, which aren't suppose to be able to work in a symbol-like manner. We also discuss whether grounding is required for language understanding - that is, whether a model that produces language well needs to connect with the real world to actually understand the text it generates. We talk about what language is for, the current limitations of large language models, how the models compare to humans, and a lot more.




Language Understanding and Representation Lab



Twitter: @Brown_NLP



Related papers

Semantic Structure in Deep Learning.



Pretraining on Interactions for Learning Grounded Affordance Representations.



Mapping Language Models to Grounded Conceptual Spaces.






0:00 - Intro
2:34 - Will LLMs make us dumb?
9:01 - Evolution of language
17:10 - Changing views on language
22:39 - Semantics, grounding, meaning
37:40 - LLMs, humans, and prediction
41:19 - How to evaluate LLMs
51:08 - Structure, semantics, and symbols in models
1:00:08 - Dimensionality
1:02:08 - Limitations of LLMs
1:07:47 - What do linguists think?
1:14:23 - What is language for?]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 163 Ellie Pavlick: The Mind of a Language Model]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>



<p class="has-text-align-center"><a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener">Check out my free video series about what's missing in AI and Neuroscience</a></p>



<a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener"></a>







<p>Ellie Pavlick runs her <a href="https://lunar.cs.brown.edu/#">Language Understanding and Representation Lab</a> at Brown University, where she studies lots of topics related to language. In AI, large language models, sometimes called foundation models, are all the rage these days, with their ability to generate convincing language, although they still make plenty of mistakes. One of the things Ellie is interested in is how these models work, what kinds of representations are being generated in them to produce the language they produce. So we discuss how she's going about studying these models. For example, probing them to see whether something symbolic-like might be implemented in the models, even though they are the deep learning neural network type, which aren't suppose to be able to work in a symbol-like manner. We also discuss whether grounding is required for language understanding - that is, whether a model that produces language well needs to connect with the real world to actually understand the text it generates. We talk about what language is for, the current limitations of large language models, how the models compare to humans, and a lot more.</p>



<ul>
<li><a href="https://lunar.cs.brown.edu/#">Language Understanding and Representation Lab</a></li>



<li>Twitter: <a href="https://twitter.com/brown_nlp?lang=en">@Brown_NLP</a></li>



<li>Related papers
<ul>
<li><a href="https://www.annualreviews.org/doi/pdf/10.1146/annurev-linguistics-031120-122924">Semantic Structure in Deep Learning</a>.</li>



<li><a href="https://aclanthology.org/2022.starsem-1.23.pdf">Pretraining on Interactions for Learning Grounded Affordance Representations</a>.</li>



<li><a href="https://openreview.net/pdf?id=gJcEM8sxHK">Mapping Language Models to Grounded Conceptual Spaces</a>.</li>
</ul>
</li>
</ul>



<p>0:00 - Intro
2:34 - Will LLMs make us dumb?
9:01 - Evolution of language
17:10 - Changing views on language
22:39 - Semantics, grounding, meaning
37:40 - LLMs, humans, and prediction
41:19 - How to evaluate LLMs
51:08 - Structure, semantics, and symbols in models
1:00:08 - Dimensionality
1:02:08 - Limitations of LLMs
1:07:47 - What do linguists think?
1:14:23 - What is language for?</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/1e12661b-9ae9-48b1-ae88-d1d1cec1f0c9-163-Elllie-Pavlick-Snapshot-Public.mp3" length="78609455"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Support the show to get full episodes and join the Discord community.







Check out my free video series about what's missing in AI and Neuroscience











Ellie Pavlick runs her Language Understanding and Representation Lab at Brown University, where she studies lots of topics related to language. In AI, large language models, sometimes called foundation models, are all the rage these days, with their ability to generate convincing language, although they still make plenty of mistakes. One of the things Ellie is interested in is how these models work, what kinds of representations are being generated in them to produce the language they produce. So we discuss how she's going about studying these models. For example, probing them to see whether something symbolic-like might be implemented in the models, even though they are the deep learning neural network type, which aren't suppose to be able to work in a symbol-like manner. We also discuss whether grounding is required for language understanding - that is, whether a model that produces language well needs to connect with the real world to actually understand the text it generates. We talk about what language is for, the current limitations of large language models, how the models compare to humans, and a lot more.




Language Understanding and Representation Lab



Twitter: @Brown_NLP



Related papers

Semantic Structure in Deep Learning.



Pretraining on Interactions for Learning Grounded Affordance Representations.



Mapping Language Models to Grounded Conceptual Spaces.






0:00 - Intro
2:34 - Will LLMs make us dumb?
9:01 - Evolution of language
17:10 - Changing views on language
22:39 - Semantics, grounding, meaning
37:40 - LLMs, humans, and prediction
41:19 - How to evaluate LLMs
51:08 - Structure, semantics, and symbols in models
1:00:08 - Dimensionality
1:02:08 - Limitations of LLMs
1:07:47 - What do linguists think?
1:14:23 - What is language for?]]>
                </itunes:summary>
                                                                            <itunes:duration>01:21:34</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 162 Earl K. Miller: Thoughts are an Emergent Property]]>
                </title>
                <pubDate>Wed, 08 Mar 2023 16:44:21 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/1431476</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-162-earl-miller-thoughts-are-an-emergent-property</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>



<p class="has-text-align-center"><a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener">Check out my free video series about what's missing in AI and Neuroscience</a></p>











<p>Earl Miller runs the Miller Lab at MIT, where he studies how our brains carry out our executive functions, like working memory, attention, and decision-making. In particular he is interested in the role of the prefrontal cortex and how it coordinates with other brain areas to carry out these functions. During this episode, we talk broadly about how neuroscience has changed during Earl's career, and how his own thoughts have changed. One thing we focus on is the increasing appreciation of brain oscillations for our cognition.</p>



<p>Recently on BI we've discussed oscillations quite a bit. In <a href="https://braininspired.co/podcast/153/">episode 153</a>, Carolyn Dicey-Jennings discussed her philosophical ideas relating attention to the notion of the self, and she leans a lot on Earl's research to make that argument.  In <a href="https://braininspired.co/podcast/160/">episode 160</a>, Ole Jensen discussed his work in humans showing that  low frequency oscillations exert a top-down control on incoming sensory stimuli, and this is directly in agreement with Earl's work over many years in nonhuman primates. So we continue that discussion relating low-frequency oscillations to executive control. We also discuss a new concept Earl has developed called spatial computing, which is an account of how brain oscillations can dictate where in various brain areas neural activity be on or off, and hence contribute or not to ongoing mental function. We also discuss working memory in particular, and a host of related topics.</p>



<ul>
<li><a href="http://ekmillerlab.mit.edu/">Miller lab.</a></li>



<li>Twitter: <a href="https://twitter.com/MillerLabMIT">@MillerLabMIT</a>.</li>



<li>Related papers:
<ul>
<li><a href="https://ekmillerlab.mit.edu/wp-content/uploads/2013/03/Miller-Cohen-20011.pdf">An integrative theory of prefrontal cortex function. Annual Review of Neuroscience</a>.</li>



<li><a href="https://ekmillerlab.mit.edu/wp-content/uploads/2022/12/Buchman-and-Miller-JOCN-2023.pdf">Working Memory Is Complex and Dynamic, Like Your Thoughts</a>.</li>



<li><a href="https://ekmillerlab.mit.edu/wp-content/uploads/2022/02/Traveling-Waves-PLOS-Comp-Bio-2022.pdf">Traveling waves in the prefrontal cortex during working memory</a>.</li>
</ul>
</li>
</ul>



<p>0:00 - Intro
6:22 - Evolution of Earl's thinking
14:58 - Role of the prefrontal cortex
25:21 - Spatial computing
32:51 - Homunculus problem
35:34 - Self
37:40 - Dimensionality and thought
46:13 - Reductionism
47:38 - Working memory and capacity
1:01:45 - Capacity as a principle
1:05:44 - Silent synapses
1:10:16 - Subspaces in dynamics</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Support the show to get full episodes and join the Discord community.







Check out my free video series about what's missing in AI and Neuroscience











Earl Miller runs the Miller Lab at MIT, where he studies how our brains carry out our executive functions, like working memory, attention, and decision-making. In particular he is interested in the role of the prefrontal cortex and how it coordinates with other brain areas to carry out these functions. During this episode, we talk broadly about how neuroscience has changed during Earl's career, and how his own thoughts have changed. One thing we focus on is the increasing appreciation of brain oscillations for our cognition.



Recently on BI we've discussed oscillations quite a bit. In episode 153, Carolyn Dicey-Jennings discussed her philosophical ideas relating attention to the notion of the self, and she leans a lot on Earl's research to make that argument.  In episode 160, Ole Jensen discussed his work in humans showing that  low frequency oscillations exert a top-down control on incoming sensory stimuli, and this is directly in agreement with Earl's work over many years in nonhuman primates. So we continue that discussion relating low-frequency oscillations to executive control. We also discuss a new concept Earl has developed called spatial computing, which is an account of how brain oscillations can dictate where in various brain areas neural activity be on or off, and hence contribute or not to ongoing mental function. We also discuss working memory in particular, and a host of related topics.




Miller lab.



Twitter: @MillerLabMIT.



Related papers:

An integrative theory of prefrontal cortex function. Annual Review of Neuroscience.



Working Memory Is Complex and Dynamic, Like Your Thoughts.



Traveling waves in the prefrontal cortex during working memory.






0:00 - Intro
6:22 - Evolution of Earl's thinking
14:58 - Role of the prefrontal cortex
25:21 - Spatial computing
32:51 - Homunculus problem
35:34 - Self
37:40 - Dimensionality and thought
46:13 - Reductionism
47:38 - Working memory and capacity
1:01:45 - Capacity as a principle
1:05:44 - Silent synapses
1:10:16 - Subspaces in dynamics]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 162 Earl K. Miller: Thoughts are an Emergent Property]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>



<p class="has-text-align-center"><a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener">Check out my free video series about what's missing in AI and Neuroscience</a></p>











<p>Earl Miller runs the Miller Lab at MIT, where he studies how our brains carry out our executive functions, like working memory, attention, and decision-making. In particular he is interested in the role of the prefrontal cortex and how it coordinates with other brain areas to carry out these functions. During this episode, we talk broadly about how neuroscience has changed during Earl's career, and how his own thoughts have changed. One thing we focus on is the increasing appreciation of brain oscillations for our cognition.</p>



<p>Recently on BI we've discussed oscillations quite a bit. In <a href="https://braininspired.co/podcast/153/">episode 153</a>, Carolyn Dicey-Jennings discussed her philosophical ideas relating attention to the notion of the self, and she leans a lot on Earl's research to make that argument.  In <a href="https://braininspired.co/podcast/160/">episode 160</a>, Ole Jensen discussed his work in humans showing that  low frequency oscillations exert a top-down control on incoming sensory stimuli, and this is directly in agreement with Earl's work over many years in nonhuman primates. So we continue that discussion relating low-frequency oscillations to executive control. We also discuss a new concept Earl has developed called spatial computing, which is an account of how brain oscillations can dictate where in various brain areas neural activity be on or off, and hence contribute or not to ongoing mental function. We also discuss working memory in particular, and a host of related topics.</p>



<ul>
<li><a href="http://ekmillerlab.mit.edu/">Miller lab.</a></li>



<li>Twitter: <a href="https://twitter.com/MillerLabMIT">@MillerLabMIT</a>.</li>



<li>Related papers:
<ul>
<li><a href="https://ekmillerlab.mit.edu/wp-content/uploads/2013/03/Miller-Cohen-20011.pdf">An integrative theory of prefrontal cortex function. Annual Review of Neuroscience</a>.</li>



<li><a href="https://ekmillerlab.mit.edu/wp-content/uploads/2022/12/Buchman-and-Miller-JOCN-2023.pdf">Working Memory Is Complex and Dynamic, Like Your Thoughts</a>.</li>



<li><a href="https://ekmillerlab.mit.edu/wp-content/uploads/2022/02/Traveling-Waves-PLOS-Comp-Bio-2022.pdf">Traveling waves in the prefrontal cortex during working memory</a>.</li>
</ul>
</li>
</ul>



<p>0:00 - Intro
6:22 - Evolution of Earl's thinking
14:58 - Role of the prefrontal cortex
25:21 - Spatial computing
32:51 - Homunculus problem
35:34 - Self
37:40 - Dimensionality and thought
46:13 - Reductionism
47:38 - Working memory and capacity
1:01:45 - Capacity as a principle
1:05:44 - Silent synapses
1:10:16 - Subspaces in dynamics</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/14291e5e-63e1-43f4-9a84-fbeee29d1132-162-Earl-Miller.mp3" length="80413787"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Support the show to get full episodes and join the Discord community.







Check out my free video series about what's missing in AI and Neuroscience











Earl Miller runs the Miller Lab at MIT, where he studies how our brains carry out our executive functions, like working memory, attention, and decision-making. In particular he is interested in the role of the prefrontal cortex and how it coordinates with other brain areas to carry out these functions. During this episode, we talk broadly about how neuroscience has changed during Earl's career, and how his own thoughts have changed. One thing we focus on is the increasing appreciation of brain oscillations for our cognition.



Recently on BI we've discussed oscillations quite a bit. In episode 153, Carolyn Dicey-Jennings discussed her philosophical ideas relating attention to the notion of the self, and she leans a lot on Earl's research to make that argument.  In episode 160, Ole Jensen discussed his work in humans showing that  low frequency oscillations exert a top-down control on incoming sensory stimuli, and this is directly in agreement with Earl's work over many years in nonhuman primates. So we continue that discussion relating low-frequency oscillations to executive control. We also discuss a new concept Earl has developed called spatial computing, which is an account of how brain oscillations can dictate where in various brain areas neural activity be on or off, and hence contribute or not to ongoing mental function. We also discuss working memory in particular, and a host of related topics.




Miller lab.



Twitter: @MillerLabMIT.



Related papers:

An integrative theory of prefrontal cortex function. Annual Review of Neuroscience.



Working Memory Is Complex and Dynamic, Like Your Thoughts.



Traveling waves in the prefrontal cortex during working memory.






0:00 - Intro
6:22 - Evolution of Earl's thinking
14:58 - Role of the prefrontal cortex
25:21 - Spatial computing
32:51 - Homunculus problem
35:34 - Self
37:40 - Dimensionality and thought
46:13 - Reductionism
47:38 - Working memory and capacity
1:01:45 - Capacity as a principle
1:05:44 - Silent synapses
1:10:16 - Subspaces in dynamics]]>
                </itunes:summary>
                                                                            <itunes:duration>01:23:27</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 161 Hugo Spiers: Navigation and Spatial Cognition]]>
                </title>
                <pubDate>Fri, 24 Feb 2023 15:32:08 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/1423670</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-161-hugo-spiers-navigation-and-spatial-cognition</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>



<p class="has-text-align-center"><a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener">Check out my free video series about what's missing in AI and Neuroscience</a></p>









<p>Hugo Spiers runs the Spiers Lab at University College London. In general Hugo is interested in understanding spatial cognition, like navigation, in relation to other processes like planning and goal-related behavior, and how brain areas like the hippocampus and prefrontal cortex coordinate these cognitive functions. So, in this episode, we discuss a range of his research and thoughts around those topics. You may have heard about the studies he's been involved with for years, regarding London taxi drivers and how their hippocampus changes as a result of their grueling efforts to memorize how to best navigate London. We talk about that, we discuss the concept of a schema, which is roughly an abstracted form of knowledge that helps you know how to behave in different environments. Probably the most common example is that we all have a schema for eating at a restaurant, independent of which restaurant we visit, we know about servers, and menus, and so on. Hugo is interested in spatial schemas, for things like navigating a new city you haven't visited. Hugo describes his work using reinforcement learning methods to compare how humans and animals solve navigation tasks. And finally we talk about the video game Hugo has been using to collect vast amount of data related to navigation, to answer questions like how our navigation ability changes over our lifetimes, the different factors that seem to matter more for our navigation skills, and so on.</p>



<ul>
<li><a href="https://spierslab.com/">Spiers Lab</a>.</li>



<li>Twitter: <a href="https://twitter.com/hugospiers">@hugospiers</a>.</li>



<li>Related papers
<ul>
<li><a href="https://www.biorxiv.org/content/10.1101/2020.09.26.314815v6">Predictive maps in rats and humans for spatial navigation</a>.</li>



<li><a href="https://www.researchgate.net/publication/365657477_From_cognitive_maps_to_spatial_schemas">From cognitive maps to spatial schemas</a>.</li>



<li><a href="https://onlinelibrary.wiley.com/doi/full/10.1002/hipo.23395">London taxi drivers: A review of neurocognitive studies and an exploration of how they build their cognitive map of London</a>.</li>



<li><a href="https://hal.science/hal-03472319/file/SpiersTICS2022.pdf">Explaining World-Wide Variation in Navigation Ability from Millions of People: Citizen Science Project Sea Hero Quest</a>.</li>
</ul>
</li>
</ul>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Support the show to get full episodes and join the Discord community.







Check out my free video series about what's missing in AI and Neuroscience









Hugo Spiers runs the Spiers Lab at University College London. In general Hugo is interested in understanding spatial cognition, like navigation, in relation to other processes like planning and goal-related behavior, and how brain areas like the hippocampus and prefrontal cortex coordinate these cognitive functions. So, in this episode, we discuss a range of his research and thoughts around those topics. You may have heard about the studies he's been involved with for years, regarding London taxi drivers and how their hippocampus changes as a result of their grueling efforts to memorize how to best navigate London. We talk about that, we discuss the concept of a schema, which is roughly an abstracted form of knowledge that helps you know how to behave in different environments. Probably the most common example is that we all have a schema for eating at a restaurant, independent of which restaurant we visit, we know about servers, and menus, and so on. Hugo is interested in spatial schemas, for things like navigating a new city you haven't visited. Hugo describes his work using reinforcement learning methods to compare how humans and animals solve navigation tasks. And finally we talk about the video game Hugo has been using to collect vast amount of data related to navigation, to answer questions like how our navigation ability changes over our lifetimes, the different factors that seem to matter more for our navigation skills, and so on.




Spiers Lab.



Twitter: @hugospiers.



Related papers

Predictive maps in rats and humans for spatial navigation.



From cognitive maps to spatial schemas.



London taxi drivers: A review of neurocognitive studies and an exploration of how they build their cognitive map of London.



Explaining World-Wide Variation in Navigation Ability from Millions of People: Citizen Science Project Sea Hero Quest.


]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 161 Hugo Spiers: Navigation and Spatial Cognition]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>



<p class="has-text-align-center"><a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener">Check out my free video series about what's missing in AI and Neuroscience</a></p>









<p>Hugo Spiers runs the Spiers Lab at University College London. In general Hugo is interested in understanding spatial cognition, like navigation, in relation to other processes like planning and goal-related behavior, and how brain areas like the hippocampus and prefrontal cortex coordinate these cognitive functions. So, in this episode, we discuss a range of his research and thoughts around those topics. You may have heard about the studies he's been involved with for years, regarding London taxi drivers and how their hippocampus changes as a result of their grueling efforts to memorize how to best navigate London. We talk about that, we discuss the concept of a schema, which is roughly an abstracted form of knowledge that helps you know how to behave in different environments. Probably the most common example is that we all have a schema for eating at a restaurant, independent of which restaurant we visit, we know about servers, and menus, and so on. Hugo is interested in spatial schemas, for things like navigating a new city you haven't visited. Hugo describes his work using reinforcement learning methods to compare how humans and animals solve navigation tasks. And finally we talk about the video game Hugo has been using to collect vast amount of data related to navigation, to answer questions like how our navigation ability changes over our lifetimes, the different factors that seem to matter more for our navigation skills, and so on.</p>



<ul>
<li><a href="https://spierslab.com/">Spiers Lab</a>.</li>



<li>Twitter: <a href="https://twitter.com/hugospiers">@hugospiers</a>.</li>



<li>Related papers
<ul>
<li><a href="https://www.biorxiv.org/content/10.1101/2020.09.26.314815v6">Predictive maps in rats and humans for spatial navigation</a>.</li>



<li><a href="https://www.researchgate.net/publication/365657477_From_cognitive_maps_to_spatial_schemas">From cognitive maps to spatial schemas</a>.</li>



<li><a href="https://onlinelibrary.wiley.com/doi/full/10.1002/hipo.23395">London taxi drivers: A review of neurocognitive studies and an exploration of how they build their cognitive map of London</a>.</li>



<li><a href="https://hal.science/hal-03472319/file/SpiersTICS2022.pdf">Explaining World-Wide Variation in Navigation Ability from Millions of People: Citizen Science Project Sea Hero Quest</a>.</li>
</ul>
</li>
</ul>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/4143c5fe-e9a1-4613-b205-aa0a49f31d9e-161-Hugo-Spiers-public.mp3" length="91146289"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Support the show to get full episodes and join the Discord community.







Check out my free video series about what's missing in AI and Neuroscience









Hugo Spiers runs the Spiers Lab at University College London. In general Hugo is interested in understanding spatial cognition, like navigation, in relation to other processes like planning and goal-related behavior, and how brain areas like the hippocampus and prefrontal cortex coordinate these cognitive functions. So, in this episode, we discuss a range of his research and thoughts around those topics. You may have heard about the studies he's been involved with for years, regarding London taxi drivers and how their hippocampus changes as a result of their grueling efforts to memorize how to best navigate London. We talk about that, we discuss the concept of a schema, which is roughly an abstracted form of knowledge that helps you know how to behave in different environments. Probably the most common example is that we all have a schema for eating at a restaurant, independent of which restaurant we visit, we know about servers, and menus, and so on. Hugo is interested in spatial schemas, for things like navigating a new city you haven't visited. Hugo describes his work using reinforcement learning methods to compare how humans and animals solve navigation tasks. And finally we talk about the video game Hugo has been using to collect vast amount of data related to navigation, to answer questions like how our navigation ability changes over our lifetimes, the different factors that seem to matter more for our navigation skills, and so on.




Spiers Lab.



Twitter: @hugospiers.



Related papers

Predictive maps in rats and humans for spatial navigation.



From cognitive maps to spatial schemas.



London taxi drivers: A review of neurocognitive studies and an exploration of how they build their cognitive map of London.



Explaining World-Wide Variation in Navigation Ability from Millions of People: Citizen Science Project Sea Hero Quest.


]]>
                </itunes:summary>
                                                                            <itunes:duration>01:34:38</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 160 Ole Jensen: Rhythms of Cognition]]>
                </title>
                <pubDate>Tue, 07 Feb 2023 16:08:37 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/1404277</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-160-ole-jensen-rhythms-of-cognition</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>



<p class="has-text-align-center"><a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener">Check out my free video series about what's missing in AI and Neuroscience</a></p>











<p>Ole Jensen is co-director of the Centre for Human Brain Health at University of Birmingham, where he runs his Neuronal Oscillations Group lab. Ole is interested in how the oscillations in our brains affect our cognition by helping to shape the spiking patterns of neurons, and by helping to allocate resources to parts of our brains that are relevant for whatever ongoing behaviors we're performing in different contexts. People have been studying oscillations for decades, finding that different frequencies of oscillations have been linked to a bunch of different cognitive functions. Some of what we discuss today is Ole's work on alpha oscillations, which are around 10 hertz, so 10 oscillations per second. The overarching story is that alpha oscillations are thought to inhibit or disrupt processing in brain areas that aren't needed during a given behavior. And therefore by disrupting everything that's not needed, resources are allocated to the brain areas that are needed. We discuss his work in the vein on attention - you may remember the episode with Carolyn Dicey-Jennings, and her ideas about how findings like Ole's are evidence we all have selves. We also talk about the role of alpha rhythms for working memory, for moving our eyes, and for previewing what we're about to look at before we move our eyes, and more broadly we discuss the role of oscillations in cognition in general, and of course what this might mean for developing better artificial intelligence.</p>



<ul>
<li><a href="https://neuosc.com/">The Neuronal Oscillations Group</a>.
<ul>
<li></li>
</ul>
</li>



<li>Twitter: <a href="https://twitter.com/neuosc">@neuosc</a>.</li>



<li>Related papers
<ul>
<li><a href="https://www.frontiersin.org/articles/10.3389/fnhum.2010.00186/full">Shaping functional architecture by oscillatory alpha activity: gating by inhibition</a></li>



<li><a href="https://www.jneurosci.org/content/37/15/4117">FEF-Controlled Alpha Delay Activity Precedes Stimulus-Induced Gamma-Band Activity in Visual Cortex</a></li>



<li><a href="https://www.cell.com/neuron/fulltext/S0896-6273(13)00231-6">The theta-gamma neural code</a></li>



<li><a href="https://www.biorxiv.org/content/biorxiv/early/2021/03/26/2021.03.25.436919.full.pdf">A pipelining mechanism supporting previewing during visual exploration and reading.</a></li>



<li><a href="https://elifesciences.org/articles/39061">Specific lexico-semantic predictions are associated with unique spatial and temporal patterns of neural activity.</a></li>
</ul>
</li>
</ul>



<p>0:00 - Intro
2:58 - Oscillations import over the years
5:51 - Oscillations big picture
17:62 - Oscillations vs. traveling waves
22:00 - Oscillations and algorithms
28:53 - Alpha oscillations and working memory
44:46 - Alpha as the controller
48:55 - Frequency tagging
52:49 - Timing of attention
57:41 - Pipelining neural processing
1:03:38 - Previewing during reading
1:15:50 - Previewing, prediction, and large language models
1:24:27 - Dyslexia</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Support the show to get full episodes and join the Discord community.







Check out my free video series about what's missing in AI and Neuroscience











Ole Jensen is co-director of the Centre for Human Brain Health at University of Birmingham, where he runs his Neuronal Oscillations Group lab. Ole is interested in how the oscillations in our brains affect our cognition by helping to shape the spiking patterns of neurons, and by helping to allocate resources to parts of our brains that are relevant for whatever ongoing behaviors we're performing in different contexts. People have been studying oscillations for decades, finding that different frequencies of oscillations have been linked to a bunch of different cognitive functions. Some of what we discuss today is Ole's work on alpha oscillations, which are around 10 hertz, so 10 oscillations per second. The overarching story is that alpha oscillations are thought to inhibit or disrupt processing in brain areas that aren't needed during a given behavior. And therefore by disrupting everything that's not needed, resources are allocated to the brain areas that are needed. We discuss his work in the vein on attention - you may remember the episode with Carolyn Dicey-Jennings, and her ideas about how findings like Ole's are evidence we all have selves. We also talk about the role of alpha rhythms for working memory, for moving our eyes, and for previewing what we're about to look at before we move our eyes, and more broadly we discuss the role of oscillations in cognition in general, and of course what this might mean for developing better artificial intelligence.




The Neuronal Oscillations Group.







Twitter: @neuosc.



Related papers

Shaping functional architecture by oscillatory alpha activity: gating by inhibition



FEF-Controlled Alpha Delay Activity Precedes Stimulus-Induced Gamma-Band Activity in Visual Cortex



The theta-gamma neural code



A pipelining mechanism supporting previewing during visual exploration and reading.



Specific lexico-semantic predictions are associated with unique spatial and temporal patterns of neural activity.






0:00 - Intro
2:58 - Oscillations import over the years
5:51 - Oscillations big picture
17:62 - Oscillations vs. traveling waves
22:00 - Oscillations and algorithms
28:53 - Alpha oscillations and working memory
44:46 - Alpha as the controller
48:55 - Frequency tagging
52:49 - Timing of attention
57:41 - Pipelining neural processing
1:03:38 - Previewing during reading
1:15:50 - Previewing, prediction, and large language models
1:24:27 - Dyslexia]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 160 Ole Jensen: Rhythms of Cognition]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>



<p class="has-text-align-center"><a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener">Check out my free video series about what's missing in AI and Neuroscience</a></p>











<p>Ole Jensen is co-director of the Centre for Human Brain Health at University of Birmingham, where he runs his Neuronal Oscillations Group lab. Ole is interested in how the oscillations in our brains affect our cognition by helping to shape the spiking patterns of neurons, and by helping to allocate resources to parts of our brains that are relevant for whatever ongoing behaviors we're performing in different contexts. People have been studying oscillations for decades, finding that different frequencies of oscillations have been linked to a bunch of different cognitive functions. Some of what we discuss today is Ole's work on alpha oscillations, which are around 10 hertz, so 10 oscillations per second. The overarching story is that alpha oscillations are thought to inhibit or disrupt processing in brain areas that aren't needed during a given behavior. And therefore by disrupting everything that's not needed, resources are allocated to the brain areas that are needed. We discuss his work in the vein on attention - you may remember the episode with Carolyn Dicey-Jennings, and her ideas about how findings like Ole's are evidence we all have selves. We also talk about the role of alpha rhythms for working memory, for moving our eyes, and for previewing what we're about to look at before we move our eyes, and more broadly we discuss the role of oscillations in cognition in general, and of course what this might mean for developing better artificial intelligence.</p>



<ul>
<li><a href="https://neuosc.com/">The Neuronal Oscillations Group</a>.
<ul>
<li></li>
</ul>
</li>



<li>Twitter: <a href="https://twitter.com/neuosc">@neuosc</a>.</li>



<li>Related papers
<ul>
<li><a href="https://www.frontiersin.org/articles/10.3389/fnhum.2010.00186/full">Shaping functional architecture by oscillatory alpha activity: gating by inhibition</a></li>



<li><a href="https://www.jneurosci.org/content/37/15/4117">FEF-Controlled Alpha Delay Activity Precedes Stimulus-Induced Gamma-Band Activity in Visual Cortex</a></li>



<li><a href="https://www.cell.com/neuron/fulltext/S0896-6273(13)00231-6">The theta-gamma neural code</a></li>



<li><a href="https://www.biorxiv.org/content/biorxiv/early/2021/03/26/2021.03.25.436919.full.pdf">A pipelining mechanism supporting previewing during visual exploration and reading.</a></li>



<li><a href="https://elifesciences.org/articles/39061">Specific lexico-semantic predictions are associated with unique spatial and temporal patterns of neural activity.</a></li>
</ul>
</li>
</ul>



<p>0:00 - Intro
2:58 - Oscillations import over the years
5:51 - Oscillations big picture
17:62 - Oscillations vs. traveling waves
22:00 - Oscillations and algorithms
28:53 - Alpha oscillations and working memory
44:46 - Alpha as the controller
48:55 - Frequency tagging
52:49 - Timing of attention
57:41 - Pipelining neural processing
1:03:38 - Previewing during reading
1:15:50 - Previewing, prediction, and large language models
1:24:27 - Dyslexia</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/16471399-dcd0-4739-ba28-45aead7373c5-160-Ole-Jensen-public.mp3" length="85409466"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Support the show to get full episodes and join the Discord community.







Check out my free video series about what's missing in AI and Neuroscience











Ole Jensen is co-director of the Centre for Human Brain Health at University of Birmingham, where he runs his Neuronal Oscillations Group lab. Ole is interested in how the oscillations in our brains affect our cognition by helping to shape the spiking patterns of neurons, and by helping to allocate resources to parts of our brains that are relevant for whatever ongoing behaviors we're performing in different contexts. People have been studying oscillations for decades, finding that different frequencies of oscillations have been linked to a bunch of different cognitive functions. Some of what we discuss today is Ole's work on alpha oscillations, which are around 10 hertz, so 10 oscillations per second. The overarching story is that alpha oscillations are thought to inhibit or disrupt processing in brain areas that aren't needed during a given behavior. And therefore by disrupting everything that's not needed, resources are allocated to the brain areas that are needed. We discuss his work in the vein on attention - you may remember the episode with Carolyn Dicey-Jennings, and her ideas about how findings like Ole's are evidence we all have selves. We also talk about the role of alpha rhythms for working memory, for moving our eyes, and for previewing what we're about to look at before we move our eyes, and more broadly we discuss the role of oscillations in cognition in general, and of course what this might mean for developing better artificial intelligence.




The Neuronal Oscillations Group.







Twitter: @neuosc.



Related papers

Shaping functional architecture by oscillatory alpha activity: gating by inhibition



FEF-Controlled Alpha Delay Activity Precedes Stimulus-Induced Gamma-Band Activity in Visual Cortex



The theta-gamma neural code



A pipelining mechanism supporting previewing during visual exploration and reading.



Specific lexico-semantic predictions are associated with unique spatial and temporal patterns of neural activity.






0:00 - Intro
2:58 - Oscillations import over the years
5:51 - Oscillations big picture
17:62 - Oscillations vs. traveling waves
22:00 - Oscillations and algorithms
28:53 - Alpha oscillations and working memory
44:46 - Alpha as the controller
48:55 - Frequency tagging
52:49 - Timing of attention
57:41 - Pipelining neural processing
1:03:38 - Previewing during reading
1:15:50 - Previewing, prediction, and large language models
1:24:27 - Dyslexia]]>
                </itunes:summary>
                                                                            <itunes:duration>01:28:39</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 159 Chris Summerfield: Natural General Intelligence]]>
                </title>
                <pubDate>Thu, 26 Jan 2023 23:18:14 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/1392114</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-159-chris-summerfield-natural-general-intelligence</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>



<p class="has-text-align-center"><a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener">Check out my free video series about what's missing in AI and Neuroscience</a></p>











<p>Chris Summerfield runs the Human Information Processing Lab at University of Oxford, and he's a research scientist at Deepmind. You may remember him from <a href="https://braininspired.co/podcast/95/">episode 95 with Sam Gershman</a>, when we discussed ideas around the usefulness of neuroscience and psychology for AI. Since then, Chris has released his book, <a href="https://amzn.to/3GIY9tO">Natural General Intelligence: How understanding the brain can help us build AI</a>. In the book, Chris makes the case that inspiration and communication between the cognitive sciences and AI is hindered by the different languages each field speaks. But in reality, there has always been and still is a lot of overlap and convergence about ideas of computation and intelligence, and he illustrates this using tons of historical and modern examples.</p>





<ul>
<li><a href="https://humaninformationprocessing.com/">Human Information Processing Lab</a>.</li>



<li>Twitter: <a href="https://twitter.com/summerfieldlab">@summerfieldlab</a>.</li>



<li>Book: <a href="https://amzn.to/3GIY9tO">Natural General Intelligence: How understanding the brain can help us build AI</a>.</li>



<li>Other books mentioned:
<ul>
<li><a href="https://amzn.to/3kC1FPu">Are We Smart Enough to Know How Smart Animals Are?</a> by Frans de Waal</li>



<li><a href="https://amzn.to/3ZQi96x">The Mind is Flat</a> by Nick Chater.</li>
</ul>
</li>
</ul>



<p>0:00 - Intro
2:20 - Natural General Intelligence
8:05 - AI and Neuro interaction
21:42 - How to build AI
25:54 - Umwelts and affordances
32:07 - Different kind of intelligence
39:16 - Ecological validity and AI
48:30 - Is reward enough?
1:05:14 - Beyond brains
1:15:10 - Large language models and brains</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Support the show to get full episodes and join the Discord community.







Check out my free video series about what's missing in AI and Neuroscience











Chris Summerfield runs the Human Information Processing Lab at University of Oxford, and he's a research scientist at Deepmind. You may remember him from episode 95 with Sam Gershman, when we discussed ideas around the usefulness of neuroscience and psychology for AI. Since then, Chris has released his book, Natural General Intelligence: How understanding the brain can help us build AI. In the book, Chris makes the case that inspiration and communication between the cognitive sciences and AI is hindered by the different languages each field speaks. But in reality, there has always been and still is a lot of overlap and convergence about ideas of computation and intelligence, and he illustrates this using tons of historical and modern examples.






Human Information Processing Lab.



Twitter: @summerfieldlab.



Book: Natural General Intelligence: How understanding the brain can help us build AI.



Other books mentioned:

Are We Smart Enough to Know How Smart Animals Are? by Frans de Waal



The Mind is Flat by Nick Chater.






0:00 - Intro
2:20 - Natural General Intelligence
8:05 - AI and Neuro interaction
21:42 - How to build AI
25:54 - Umwelts and affordances
32:07 - Different kind of intelligence
39:16 - Ecological validity and AI
48:30 - Is reward enough?
1:05:14 - Beyond brains
1:15:10 - Large language models and brains]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 159 Chris Summerfield: Natural General Intelligence]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>



<p class="has-text-align-center"><a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener">Check out my free video series about what's missing in AI and Neuroscience</a></p>











<p>Chris Summerfield runs the Human Information Processing Lab at University of Oxford, and he's a research scientist at Deepmind. You may remember him from <a href="https://braininspired.co/podcast/95/">episode 95 with Sam Gershman</a>, when we discussed ideas around the usefulness of neuroscience and psychology for AI. Since then, Chris has released his book, <a href="https://amzn.to/3GIY9tO">Natural General Intelligence: How understanding the brain can help us build AI</a>. In the book, Chris makes the case that inspiration and communication between the cognitive sciences and AI is hindered by the different languages each field speaks. But in reality, there has always been and still is a lot of overlap and convergence about ideas of computation and intelligence, and he illustrates this using tons of historical and modern examples.</p>





<ul>
<li><a href="https://humaninformationprocessing.com/">Human Information Processing Lab</a>.</li>



<li>Twitter: <a href="https://twitter.com/summerfieldlab">@summerfieldlab</a>.</li>



<li>Book: <a href="https://amzn.to/3GIY9tO">Natural General Intelligence: How understanding the brain can help us build AI</a>.</li>



<li>Other books mentioned:
<ul>
<li><a href="https://amzn.to/3kC1FPu">Are We Smart Enough to Know How Smart Animals Are?</a> by Frans de Waal</li>



<li><a href="https://amzn.to/3ZQi96x">The Mind is Flat</a> by Nick Chater.</li>
</ul>
</li>
</ul>



<p>0:00 - Intro
2:20 - Natural General Intelligence
8:05 - AI and Neuro interaction
21:42 - How to build AI
25:54 - Umwelts and affordances
32:07 - Different kind of intelligence
39:16 - Ecological validity and AI
48:30 - Is reward enough?
1:05:14 - Beyond brains
1:15:10 - Large language models and brains</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/9c600425-be8d-49b5-9274-7c92175ab439-159-Chris-Summerfield-public.mp3" length="85633753"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Support the show to get full episodes and join the Discord community.







Check out my free video series about what's missing in AI and Neuroscience











Chris Summerfield runs the Human Information Processing Lab at University of Oxford, and he's a research scientist at Deepmind. You may remember him from episode 95 with Sam Gershman, when we discussed ideas around the usefulness of neuroscience and psychology for AI. Since then, Chris has released his book, Natural General Intelligence: How understanding the brain can help us build AI. In the book, Chris makes the case that inspiration and communication between the cognitive sciences and AI is hindered by the different languages each field speaks. But in reality, there has always been and still is a lot of overlap and convergence about ideas of computation and intelligence, and he illustrates this using tons of historical and modern examples.






Human Information Processing Lab.



Twitter: @summerfieldlab.



Book: Natural General Intelligence: How understanding the brain can help us build AI.



Other books mentioned:

Are We Smart Enough to Know How Smart Animals Are? by Frans de Waal



The Mind is Flat by Nick Chater.






0:00 - Intro
2:20 - Natural General Intelligence
8:05 - AI and Neuro interaction
21:42 - How to build AI
25:54 - Umwelts and affordances
32:07 - Different kind of intelligence
39:16 - Ecological validity and AI
48:30 - Is reward enough?
1:05:14 - Beyond brains
1:15:10 - Large language models and brains]]>
                </itunes:summary>
                                                                            <itunes:duration>01:28:53</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 158 Paul Rosenbloom: Cognitive Architectures]]>
                </title>
                <pubDate>Mon, 16 Jan 2023 13:50:23 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/1378253</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-158-paul-rosenbloom-cognitive-architectures</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener">Check out my free video series about what's missing in AI and Neuroscience</a></p>







<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p>Paul Rosenbloom is Professor Emeritus of Computer Science at the University of Southern California. In the early 1980s, Paul , along with John Laird and the early AI pioneer Alan Newell, developed one the earliest and best know cognitive architectures called SOAR. A cognitive architecture, as Paul defines it, is a model of the fixed structures and processes underlying minds, and in Paul's case the human mind. And SOAR was aimed at generating general intelligence. He doesn't work on SOAR any more, although SOAR is still alive and well in the hands of his old partner John Laird. He did go on to develop another cognitive architecture, called Sigma, and in the intervening years between those projects, among other things Paul stepped back and explored how our various scientific domains are related, and how computing itself should be considered a great scientific domain. That's in his book <a href="https://amzn.to/3WojDlL">On Computing: The Fourth Great Scientific Domain</a>.</p>





<p>He also helped develop the Common Model of Cognition, which isn't a cognitive architecture itself, but instead a theoretical model meant to generate consensus regarding the minimal components for a human-like mind. The idea is roughly to create a shared language and framework among cognitive architecture researchers, so the field can , so that whatever cognitive architecture you work on, you have a basis to compare it to, and can communicate effectively among your peers.</p>



<p>All of what I just said, and much of what we discuss, can be found in Paul's memoir, <a href="https://www.dropbox.com/s/5o39z7gj2n1utg3/The%20Search%20for%20Insight%207-31-22.pdf?dl=0" target="_blank" rel="noreferrer noopener">In Search of Insight: My Life as an Architectural Explorer</a>.</p>



<ul>
<li><a href="https://sites.usc.edu/rosenbloom/">Paul's website</a>.</li>



<li>Related papers
<ul>
<li>Working memoir: <a href="https://www.dropbox.com/s/5o39z7gj2n1utg3/The%20Search%20for%20Insight%207-31-22.pdf?dl=0" target="_blank" rel="noreferrer noopener">In Search of Insight: My Life as an Architectural Explorer</a>.</li>



<li>Book: <a href="https://amzn.to/3WojDlL">On Computing: The Fourth Great Scientific Domain</a>.</li>



<li><a href="https://soar.eecs.umich.edu/pubs/Laird_etal_StandardModel_AImag_2018.pdf">A Standard Model of the Mind: Toward a Common Computational Framework across Artificial Intelligence, Cognitive Science, Neuroscience, and Robotics</a>.</li>



<li><a href="https://soar.eecs.umich.edu/pubs/stocco2021connectome.pdf">Analysis of the human connectome data supports the notion of a “Common Model of Cognition” for human and human-like intelligence across domains</a>.</li>



<li><a href="https://ojs.library.carleton.ca/index.php/cmcb/index">Common Model of Cognition Bulletin</a>.</li>
</ul>
</li>
</ul>



<p>0:00 - Intro
3:26 - A career of exploration
7:00 - Alan Newell
14:47 - Relational model and dichotomic maps
24:22 - Cognitive architectures
28:31 - SOAR cognitive architecture
41:14 - Sigma cognitive architecture
43:58 - SOAR vs. Sigma
53:06 - Cognitive architecture community
55:31 - Common model of cognition
1:11:13 - What's missing from the common model
1:17:48 - Brains vs. cognitive architectures
1:21:22 - Mapping the common model onto the brain
1:24:50 - Deep learning
1:30:23 - AGI</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Check out my free video series about what's missing in AI and Neuroscience







Support the show to get full episodes and join the Discord community.









Paul Rosenbloom is Professor Emeritus of Computer Science at the University of Southern California. In the early 1980s, Paul , along with John Laird and the early AI pioneer Alan Newell, developed one the earliest and best know cognitive architectures called SOAR. A cognitive architecture, as Paul defines it, is a model of the fixed structures and processes underlying minds, and in Paul's case the human mind. And SOAR was aimed at generating general intelligence. He doesn't work on SOAR any more, although SOAR is still alive and well in the hands of his old partner John Laird. He did go on to develop another cognitive architecture, called Sigma, and in the intervening years between those projects, among other things Paul stepped back and explored how our various scientific domains are related, and how computing itself should be considered a great scientific domain. That's in his book On Computing: The Fourth Great Scientific Domain.





He also helped develop the Common Model of Cognition, which isn't a cognitive architecture itself, but instead a theoretical model meant to generate consensus regarding the minimal components for a human-like mind. The idea is roughly to create a shared language and framework among cognitive architecture researchers, so the field can , so that whatever cognitive architecture you work on, you have a basis to compare it to, and can communicate effectively among your peers.



All of what I just said, and much of what we discuss, can be found in Paul's memoir, In Search of Insight: My Life as an Architectural Explorer.




Paul's website.



Related papers

Working memoir: In Search of Insight: My Life as an Architectural Explorer.



Book: On Computing: The Fourth Great Scientific Domain.



A Standard Model of the Mind: Toward a Common Computational Framework across Artificial Intelligence, Cognitive Science, Neuroscience, and Robotics.



Analysis of the human connectome data supports the notion of a “Common Model of Cognition” for human and human-like intelligence across domains.



Common Model of Cognition Bulletin.






0:00 - Intro
3:26 - A career of exploration
7:00 - Alan Newell
14:47 - Relational model and dichotomic maps
24:22 - Cognitive architectures
28:31 - SOAR cognitive architecture
41:14 - Sigma cognitive architecture
43:58 - SOAR vs. Sigma
53:06 - Cognitive architecture community
55:31 - Common model of cognition
1:11:13 - What's missing from the common model
1:17:48 - Brains vs. cognitive architectures
1:21:22 - Mapping the common model onto the brain
1:24:50 - Deep learning
1:30:23 - AGI]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 158 Paul Rosenbloom: Cognitive Architectures]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener">Check out my free video series about what's missing in AI and Neuroscience</a></p>







<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p>Paul Rosenbloom is Professor Emeritus of Computer Science at the University of Southern California. In the early 1980s, Paul , along with John Laird and the early AI pioneer Alan Newell, developed one the earliest and best know cognitive architectures called SOAR. A cognitive architecture, as Paul defines it, is a model of the fixed structures and processes underlying minds, and in Paul's case the human mind. And SOAR was aimed at generating general intelligence. He doesn't work on SOAR any more, although SOAR is still alive and well in the hands of his old partner John Laird. He did go on to develop another cognitive architecture, called Sigma, and in the intervening years between those projects, among other things Paul stepped back and explored how our various scientific domains are related, and how computing itself should be considered a great scientific domain. That's in his book <a href="https://amzn.to/3WojDlL">On Computing: The Fourth Great Scientific Domain</a>.</p>





<p>He also helped develop the Common Model of Cognition, which isn't a cognitive architecture itself, but instead a theoretical model meant to generate consensus regarding the minimal components for a human-like mind. The idea is roughly to create a shared language and framework among cognitive architecture researchers, so the field can , so that whatever cognitive architecture you work on, you have a basis to compare it to, and can communicate effectively among your peers.</p>



<p>All of what I just said, and much of what we discuss, can be found in Paul's memoir, <a href="https://www.dropbox.com/s/5o39z7gj2n1utg3/The%20Search%20for%20Insight%207-31-22.pdf?dl=0" target="_blank" rel="noreferrer noopener">In Search of Insight: My Life as an Architectural Explorer</a>.</p>



<ul>
<li><a href="https://sites.usc.edu/rosenbloom/">Paul's website</a>.</li>



<li>Related papers
<ul>
<li>Working memoir: <a href="https://www.dropbox.com/s/5o39z7gj2n1utg3/The%20Search%20for%20Insight%207-31-22.pdf?dl=0" target="_blank" rel="noreferrer noopener">In Search of Insight: My Life as an Architectural Explorer</a>.</li>



<li>Book: <a href="https://amzn.to/3WojDlL">On Computing: The Fourth Great Scientific Domain</a>.</li>



<li><a href="https://soar.eecs.umich.edu/pubs/Laird_etal_StandardModel_AImag_2018.pdf">A Standard Model of the Mind: Toward a Common Computational Framework across Artificial Intelligence, Cognitive Science, Neuroscience, and Robotics</a>.</li>



<li><a href="https://soar.eecs.umich.edu/pubs/stocco2021connectome.pdf">Analysis of the human connectome data supports the notion of a “Common Model of Cognition” for human and human-like intelligence across domains</a>.</li>



<li><a href="https://ojs.library.carleton.ca/index.php/cmcb/index">Common Model of Cognition Bulletin</a>.</li>
</ul>
</li>
</ul>



<p>0:00 - Intro
3:26 - A career of exploration
7:00 - Alan Newell
14:47 - Relational model and dichotomic maps
24:22 - Cognitive architectures
28:31 - SOAR cognitive architecture
41:14 - Sigma cognitive architecture
43:58 - SOAR vs. Sigma
53:06 - Cognitive architecture community
55:31 - Common model of cognition
1:11:13 - What's missing from the common model
1:17:48 - Brains vs. cognitive architectures
1:21:22 - Mapping the common model onto the brain
1:24:50 - Deep learning
1:30:23 - AGI</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/d356add4-0f4c-4023-a9ee-eb9f0ffe3c4f-158-Paul-Rosenbloom-public.mp3" length="91689946"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Check out my free video series about what's missing in AI and Neuroscience







Support the show to get full episodes and join the Discord community.









Paul Rosenbloom is Professor Emeritus of Computer Science at the University of Southern California. In the early 1980s, Paul , along with John Laird and the early AI pioneer Alan Newell, developed one the earliest and best know cognitive architectures called SOAR. A cognitive architecture, as Paul defines it, is a model of the fixed structures and processes underlying minds, and in Paul's case the human mind. And SOAR was aimed at generating general intelligence. He doesn't work on SOAR any more, although SOAR is still alive and well in the hands of his old partner John Laird. He did go on to develop another cognitive architecture, called Sigma, and in the intervening years between those projects, among other things Paul stepped back and explored how our various scientific domains are related, and how computing itself should be considered a great scientific domain. That's in his book On Computing: The Fourth Great Scientific Domain.





He also helped develop the Common Model of Cognition, which isn't a cognitive architecture itself, but instead a theoretical model meant to generate consensus regarding the minimal components for a human-like mind. The idea is roughly to create a shared language and framework among cognitive architecture researchers, so the field can , so that whatever cognitive architecture you work on, you have a basis to compare it to, and can communicate effectively among your peers.



All of what I just said, and much of what we discuss, can be found in Paul's memoir, In Search of Insight: My Life as an Architectural Explorer.




Paul's website.



Related papers

Working memoir: In Search of Insight: My Life as an Architectural Explorer.



Book: On Computing: The Fourth Great Scientific Domain.



A Standard Model of the Mind: Toward a Common Computational Framework across Artificial Intelligence, Cognitive Science, Neuroscience, and Robotics.



Analysis of the human connectome data supports the notion of a “Common Model of Cognition” for human and human-like intelligence across domains.



Common Model of Cognition Bulletin.






0:00 - Intro
3:26 - A career of exploration
7:00 - Alan Newell
14:47 - Relational model and dichotomic maps
24:22 - Cognitive architectures
28:31 - SOAR cognitive architecture
41:14 - Sigma cognitive architecture
43:58 - SOAR vs. Sigma
53:06 - Cognitive architecture community
55:31 - Common model of cognition
1:11:13 - What's missing from the common model
1:17:48 - Brains vs. cognitive architectures
1:21:22 - Mapping the common model onto the brain
1:24:50 - Deep learning
1:30:23 - AGI]]>
                </itunes:summary>
                                                                            <itunes:duration>01:35:12</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 157 Sarah Robins: Philosophy of Memory]]>
                </title>
                <pubDate>Mon, 02 Jan 2023 20:32:43 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/1367447</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-157-sarah-robins-philosophy-of-memory</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>



<p class="has-text-align-center"><a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener">Check out my free video series about what's missing in AI and Neuroscience</a></p>











<p>Sarah Robins is a philosopher at the University of Kansas, one a growing handful of philosophers specializing in memory. Much of her work focuses on memory traces, which is roughly the idea that somehow our memories leave a trace in our minds. We discuss memory traces themselves and how they relate to the engram (see <a href="https://braininspired.co/podcast/126/">BI 126 Randy Gallistel: Where Is the Engram?</a>, and <a href="https://braininspired.co/podcast/127/">BI 127 Tomás Ryan: Memory, Instinct, and Forgetting</a>).</p>



<p>Psychology has divided memories into many categories - the taxonomy of memory. Sarah and I discuss how memory traces may cross-cut those categories, suggesting we may need to re-think our current ontology and taxonomy of memory.</p>



<p>We discuss a couple challenges to the idea of a stable memory trace in the brain. Neural dynamics is the notion that all our molecules and synapses are constantly changing and being recycled. Memory consolidation refers to the process of transferring our memory traces from an early unstable version to a more stable long-term version in a different part of the brain. Sarah thinks neither challenge poses a real threat to the idea</p>



<p>We also discuss the impact of optogenetics on the philosophy and neuroscience and memory, the debate about whether memory and imagination are essentially the same thing, whether memory's function is future oriented, and whether we want to build AI with our often faulty human-like memory or with perfect memory.</p>



<ul>
<li><a href="https://www.sarahkrobins.com/">Sarah's website</a>.</li>



<li>Twitter: <a href="https://twitter.com/SarahKRobins">@SarahKRobins</a>.</li>



<li>Related papers:
<ul>
<li>Her Memory chapter, with Felipe de Brigard, in the book <a href="https://amzn.to/3C89n9F">Mind, Cognition, and Neuroscience: A Philosophical Introduction.</a></li>



<li><a href="https://c66264d2-5fb2-4eed-b8b6-7899b2613e1c.filesusr.com/ugd/15e503_442ce387006d4c50b497b940a3eded1f.docx?dn=Memory%20and%20Optogenetic%20Intervention.docx">Memory and Optogenetic Intervention: Separating the engram from the ecphory</a>.</li>



<li><a href="https://www.cambridge.org/core/journals/philosophy-of-science/article/abs/stable-engrams-and-neural-dynamics/9945B7082CDC4EA3A45C9B458F74EB29">Stable Engrams and Neural Dynamics.</a></li>
</ul>
</li>
</ul>



<p>0:00 - Intro
4:18 - Philosophy of memory
5:10 - Making a move
6:55 - State of philosophy of memory
11:19 - Memory traces or the engram
20:44 - Taxonomy of memory
25:50 - Cognitive ontologies, neuroscience, and psychology
29:39 - Optogenetics
33:48 - Memory traces vs. neural dynamics and consolidation
40:32 - What is the boundary of a memory?
43:00 - Process philosophy and memory
45:07 - Memory vs. imagination
49:40 - Constructivist view of memory and imagination
54:05 - Is memory for the future?
58:00 - Memory errors and intelligence
1:00:42 - Memory and AI
1:06:20 - Creativity and memory errors</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Support the show to get full episodes and join the Discord community.







Check out my free video series about what's missing in AI and Neuroscience











Sarah Robins is a philosopher at the University of Kansas, one a growing handful of philosophers specializing in memory. Much of her work focuses on memory traces, which is roughly the idea that somehow our memories leave a trace in our minds. We discuss memory traces themselves and how they relate to the engram (see BI 126 Randy Gallistel: Where Is the Engram?, and BI 127 Tomás Ryan: Memory, Instinct, and Forgetting).



Psychology has divided memories into many categories - the taxonomy of memory. Sarah and I discuss how memory traces may cross-cut those categories, suggesting we may need to re-think our current ontology and taxonomy of memory.



We discuss a couple challenges to the idea of a stable memory trace in the brain. Neural dynamics is the notion that all our molecules and synapses are constantly changing and being recycled. Memory consolidation refers to the process of transferring our memory traces from an early unstable version to a more stable long-term version in a different part of the brain. Sarah thinks neither challenge poses a real threat to the idea



We also discuss the impact of optogenetics on the philosophy and neuroscience and memory, the debate about whether memory and imagination are essentially the same thing, whether memory's function is future oriented, and whether we want to build AI with our often faulty human-like memory or with perfect memory.




Sarah's website.



Twitter: @SarahKRobins.



Related papers:

Her Memory chapter, with Felipe de Brigard, in the book Mind, Cognition, and Neuroscience: A Philosophical Introduction.



Memory and Optogenetic Intervention: Separating the engram from the ecphory.



Stable Engrams and Neural Dynamics.






0:00 - Intro
4:18 - Philosophy of memory
5:10 - Making a move
6:55 - State of philosophy of memory
11:19 - Memory traces or the engram
20:44 - Taxonomy of memory
25:50 - Cognitive ontologies, neuroscience, and psychology
29:39 - Optogenetics
33:48 - Memory traces vs. neural dynamics and consolidation
40:32 - What is the boundary of a memory?
43:00 - Process philosophy and memory
45:07 - Memory vs. imagination
49:40 - Constructivist view of memory and imagination
54:05 - Is memory for the future?
58:00 - Memory errors and intelligence
1:00:42 - Memory and AI
1:06:20 - Creativity and memory errors]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 157 Sarah Robins: Philosophy of Memory]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>



<p class="has-text-align-center"><a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener">Check out my free video series about what's missing in AI and Neuroscience</a></p>











<p>Sarah Robins is a philosopher at the University of Kansas, one a growing handful of philosophers specializing in memory. Much of her work focuses on memory traces, which is roughly the idea that somehow our memories leave a trace in our minds. We discuss memory traces themselves and how they relate to the engram (see <a href="https://braininspired.co/podcast/126/">BI 126 Randy Gallistel: Where Is the Engram?</a>, and <a href="https://braininspired.co/podcast/127/">BI 127 Tomás Ryan: Memory, Instinct, and Forgetting</a>).</p>



<p>Psychology has divided memories into many categories - the taxonomy of memory. Sarah and I discuss how memory traces may cross-cut those categories, suggesting we may need to re-think our current ontology and taxonomy of memory.</p>



<p>We discuss a couple challenges to the idea of a stable memory trace in the brain. Neural dynamics is the notion that all our molecules and synapses are constantly changing and being recycled. Memory consolidation refers to the process of transferring our memory traces from an early unstable version to a more stable long-term version in a different part of the brain. Sarah thinks neither challenge poses a real threat to the idea</p>



<p>We also discuss the impact of optogenetics on the philosophy and neuroscience and memory, the debate about whether memory and imagination are essentially the same thing, whether memory's function is future oriented, and whether we want to build AI with our often faulty human-like memory or with perfect memory.</p>



<ul>
<li><a href="https://www.sarahkrobins.com/">Sarah's website</a>.</li>



<li>Twitter: <a href="https://twitter.com/SarahKRobins">@SarahKRobins</a>.</li>



<li>Related papers:
<ul>
<li>Her Memory chapter, with Felipe de Brigard, in the book <a href="https://amzn.to/3C89n9F">Mind, Cognition, and Neuroscience: A Philosophical Introduction.</a></li>



<li><a href="https://c66264d2-5fb2-4eed-b8b6-7899b2613e1c.filesusr.com/ugd/15e503_442ce387006d4c50b497b940a3eded1f.docx?dn=Memory%20and%20Optogenetic%20Intervention.docx">Memory and Optogenetic Intervention: Separating the engram from the ecphory</a>.</li>



<li><a href="https://www.cambridge.org/core/journals/philosophy-of-science/article/abs/stable-engrams-and-neural-dynamics/9945B7082CDC4EA3A45C9B458F74EB29">Stable Engrams and Neural Dynamics.</a></li>
</ul>
</li>
</ul>



<p>0:00 - Intro
4:18 - Philosophy of memory
5:10 - Making a move
6:55 - State of philosophy of memory
11:19 - Memory traces or the engram
20:44 - Taxonomy of memory
25:50 - Cognitive ontologies, neuroscience, and psychology
29:39 - Optogenetics
33:48 - Memory traces vs. neural dynamics and consolidation
40:32 - What is the boundary of a memory?
43:00 - Process philosophy and memory
45:07 - Memory vs. imagination
49:40 - Constructivist view of memory and imagination
54:05 - Is memory for the future?
58:00 - Memory errors and intelligence
1:00:42 - Memory and AI
1:06:20 - Creativity and memory errors</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/d8210c05-42a4-4b34-b516-d42b85b770c7-157-Sarah-Robins-public.mp3" length="78045950"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Support the show to get full episodes and join the Discord community.







Check out my free video series about what's missing in AI and Neuroscience











Sarah Robins is a philosopher at the University of Kansas, one a growing handful of philosophers specializing in memory. Much of her work focuses on memory traces, which is roughly the idea that somehow our memories leave a trace in our minds. We discuss memory traces themselves and how they relate to the engram (see BI 126 Randy Gallistel: Where Is the Engram?, and BI 127 Tomás Ryan: Memory, Instinct, and Forgetting).



Psychology has divided memories into many categories - the taxonomy of memory. Sarah and I discuss how memory traces may cross-cut those categories, suggesting we may need to re-think our current ontology and taxonomy of memory.



We discuss a couple challenges to the idea of a stable memory trace in the brain. Neural dynamics is the notion that all our molecules and synapses are constantly changing and being recycled. Memory consolidation refers to the process of transferring our memory traces from an early unstable version to a more stable long-term version in a different part of the brain. Sarah thinks neither challenge poses a real threat to the idea



We also discuss the impact of optogenetics on the philosophy and neuroscience and memory, the debate about whether memory and imagination are essentially the same thing, whether memory's function is future oriented, and whether we want to build AI with our often faulty human-like memory or with perfect memory.




Sarah's website.



Twitter: @SarahKRobins.



Related papers:

Her Memory chapter, with Felipe de Brigard, in the book Mind, Cognition, and Neuroscience: A Philosophical Introduction.



Memory and Optogenetic Intervention: Separating the engram from the ecphory.



Stable Engrams and Neural Dynamics.






0:00 - Intro
4:18 - Philosophy of memory
5:10 - Making a move
6:55 - State of philosophy of memory
11:19 - Memory traces or the engram
20:44 - Taxonomy of memory
25:50 - Cognitive ontologies, neuroscience, and psychology
29:39 - Optogenetics
33:48 - Memory traces vs. neural dynamics and consolidation
40:32 - What is the boundary of a memory?
43:00 - Process philosophy and memory
45:07 - Memory vs. imagination
49:40 - Constructivist view of memory and imagination
54:05 - Is memory for the future?
58:00 - Memory errors and intelligence
1:00:42 - Memory and AI
1:06:20 - Creativity and memory errors]]>
                </itunes:summary>
                                                                            <itunes:duration>01:20:59</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 156 Mariam Aly: Memory, Attention, and Perception]]>
                </title>
                <pubDate>Fri, 23 Dec 2022 00:37:17 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/1357749</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-156-mariam-aly-memory-attention-and-perception</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>



<p class="has-text-align-center"><a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener">Check out my free video series about what's missing in AI and Neuroscience</a></p>











<p>Mariam Aly runs the Aly lab at Columbia University, where she studies the interaction of memory, attention, and perception in brain regions like the hippocampus. The short story is that memory affects our perceptions, attention affects our memories, memories affect our attention, and these effects have signatures in neural activity measurements in our hippocampus and other brain areas. We discuss her experiments testing the nature of those interactions. We also discuss a particularly difficult stretch in Mariam's graduate school years, and how she now prioritizes her mental health.</p>



<ul>
<li><a href="https://www.alylab.org/">Aly Lab</a>.</li>



<li>Twitter: <a href="https://twitter.com/mariam_s_aly">@mariam_s_aly</a>.</li>



<li>Related papers
<ul>
<li><a href="https://www.alylab.org/_files/ugd/1d2439_764d2d599b94432090070316796bb6f8.pdf">Attention promotes episodic encoding by stabilizing hippocampal representations</a>.</li>



<li><a href="https://www.alylab.org/_files/ugd/1d2439_f834876a78cc441dbcd23831023dcb77.pdf">The medial temporal lobe is critical for spatial relational perception</a>.</li>



<li><a href="https://www.alylab.org/_files/ugd/1d2439_f3e97b69b76b4b0fbb19d8845192d7d4.pdf">Cholinergic modulation of hippocampally mediated attention and perception</a>.</li>



<li><a href="https://www.alylab.org/_files/ugd/1d2439_7d24b5618ddc469b883297c8affdbe4f.pdf">Preparation for upcoming attentional states in the hippocampus and medial prefrontal cortex</a>.</li>



<li><a href="https://www.alylab.org/_files/ugd/1d2439_f6d1f29d60f64e92b8c7a36109de3e54.pdf">How hippocampal memory shapes, and is shaped by, attention</a>.</li>



<li><a href="https://psyarxiv.com/j32bn">Attentional fluctuations and the temporal organization of memory</a>.</li>
</ul>
</li>
</ul>



<p>0:00 - Intro
3:50 - Mariam's background
9:32 - Hippocampus history and current science
12:34 - hippocampus and perception
13:42 - Relational information
18:30 - How much memory is explicit?
22:32 - How attention affects hippocampus
32:40 - fMRI levels vs. stability
39:04 - How is hippocampus necessary for attention
57:00 - How much does attention affect memory?
1:02:24 - How memory affects attention
1:06:50 - Attention and memory relation big picture
1:07:42 - Current state of memory and attention
1:12:12 - Modularity
1:17:52 - Practical advice to improve attention/memory
1:21:22 - Mariam's challenges</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Support the show to get full episodes and join the Discord community.







Check out my free video series about what's missing in AI and Neuroscience











Mariam Aly runs the Aly lab at Columbia University, where she studies the interaction of memory, attention, and perception in brain regions like the hippocampus. The short story is that memory affects our perceptions, attention affects our memories, memories affect our attention, and these effects have signatures in neural activity measurements in our hippocampus and other brain areas. We discuss her experiments testing the nature of those interactions. We also discuss a particularly difficult stretch in Mariam's graduate school years, and how she now prioritizes her mental health.




Aly Lab.



Twitter: @mariam_s_aly.



Related papers

Attention promotes episodic encoding by stabilizing hippocampal representations.



The medial temporal lobe is critical for spatial relational perception.



Cholinergic modulation of hippocampally mediated attention and perception.



Preparation for upcoming attentional states in the hippocampus and medial prefrontal cortex.



How hippocampal memory shapes, and is shaped by, attention.



Attentional fluctuations and the temporal organization of memory.






0:00 - Intro
3:50 - Mariam's background
9:32 - Hippocampus history and current science
12:34 - hippocampus and perception
13:42 - Relational information
18:30 - How much memory is explicit?
22:32 - How attention affects hippocampus
32:40 - fMRI levels vs. stability
39:04 - How is hippocampus necessary for attention
57:00 - How much does attention affect memory?
1:02:24 - How memory affects attention
1:06:50 - Attention and memory relation big picture
1:07:42 - Current state of memory and attention
1:12:12 - Modularity
1:17:52 - Practical advice to improve attention/memory
1:21:22 - Mariam's challenges]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 156 Mariam Aly: Memory, Attention, and Perception]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>



<p class="has-text-align-center"><a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener">Check out my free video series about what's missing in AI and Neuroscience</a></p>











<p>Mariam Aly runs the Aly lab at Columbia University, where she studies the interaction of memory, attention, and perception in brain regions like the hippocampus. The short story is that memory affects our perceptions, attention affects our memories, memories affect our attention, and these effects have signatures in neural activity measurements in our hippocampus and other brain areas. We discuss her experiments testing the nature of those interactions. We also discuss a particularly difficult stretch in Mariam's graduate school years, and how she now prioritizes her mental health.</p>



<ul>
<li><a href="https://www.alylab.org/">Aly Lab</a>.</li>



<li>Twitter: <a href="https://twitter.com/mariam_s_aly">@mariam_s_aly</a>.</li>



<li>Related papers
<ul>
<li><a href="https://www.alylab.org/_files/ugd/1d2439_764d2d599b94432090070316796bb6f8.pdf">Attention promotes episodic encoding by stabilizing hippocampal representations</a>.</li>



<li><a href="https://www.alylab.org/_files/ugd/1d2439_f834876a78cc441dbcd23831023dcb77.pdf">The medial temporal lobe is critical for spatial relational perception</a>.</li>



<li><a href="https://www.alylab.org/_files/ugd/1d2439_f3e97b69b76b4b0fbb19d8845192d7d4.pdf">Cholinergic modulation of hippocampally mediated attention and perception</a>.</li>



<li><a href="https://www.alylab.org/_files/ugd/1d2439_7d24b5618ddc469b883297c8affdbe4f.pdf">Preparation for upcoming attentional states in the hippocampus and medial prefrontal cortex</a>.</li>



<li><a href="https://www.alylab.org/_files/ugd/1d2439_f6d1f29d60f64e92b8c7a36109de3e54.pdf">How hippocampal memory shapes, and is shaped by, attention</a>.</li>



<li><a href="https://psyarxiv.com/j32bn">Attentional fluctuations and the temporal organization of memory</a>.</li>
</ul>
</li>
</ul>



<p>0:00 - Intro
3:50 - Mariam's background
9:32 - Hippocampus history and current science
12:34 - hippocampus and perception
13:42 - Relational information
18:30 - How much memory is explicit?
22:32 - How attention affects hippocampus
32:40 - fMRI levels vs. stability
39:04 - How is hippocampus necessary for attention
57:00 - How much does attention affect memory?
1:02:24 - How memory affects attention
1:06:50 - Attention and memory relation big picture
1:07:42 - Current state of memory and attention
1:12:12 - Modularity
1:17:52 - Practical advice to improve attention/memory
1:21:22 - Mariam's challenges</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/2dad2682-5ae2-43df-8826-d37da22b45ba-156-Mariam-Aly-public.mp3" length="97028589"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Support the show to get full episodes and join the Discord community.







Check out my free video series about what's missing in AI and Neuroscience











Mariam Aly runs the Aly lab at Columbia University, where she studies the interaction of memory, attention, and perception in brain regions like the hippocampus. The short story is that memory affects our perceptions, attention affects our memories, memories affect our attention, and these effects have signatures in neural activity measurements in our hippocampus and other brain areas. We discuss her experiments testing the nature of those interactions. We also discuss a particularly difficult stretch in Mariam's graduate school years, and how she now prioritizes her mental health.




Aly Lab.



Twitter: @mariam_s_aly.



Related papers

Attention promotes episodic encoding by stabilizing hippocampal representations.



The medial temporal lobe is critical for spatial relational perception.



Cholinergic modulation of hippocampally mediated attention and perception.



Preparation for upcoming attentional states in the hippocampus and medial prefrontal cortex.



How hippocampal memory shapes, and is shaped by, attention.



Attentional fluctuations and the temporal organization of memory.






0:00 - Intro
3:50 - Mariam's background
9:32 - Hippocampus history and current science
12:34 - hippocampus and perception
13:42 - Relational information
18:30 - How much memory is explicit?
22:32 - How attention affects hippocampus
32:40 - fMRI levels vs. stability
39:04 - How is hippocampus necessary for attention
57:00 - How much does attention affect memory?
1:02:24 - How memory affects attention
1:06:50 - Attention and memory relation big picture
1:07:42 - Current state of memory and attention
1:12:12 - Modularity
1:17:52 - Practical advice to improve attention/memory
1:21:22 - Mariam's challenges]]>
                </itunes:summary>
                                                                            <itunes:duration>01:40:45</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 155 Luiz Pessoa: The Entangled Brain]]>
                </title>
                <pubDate>Sat, 10 Dec 2022 06:46:08 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/1346717</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-155-luiz-pessoa-the-entangled-brain</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>



<p class="has-text-align-center"><a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener">Check out my free video series about what's missing in AI and Neuroscience</a></p>















<p>Luiz Pessoa runs his <a href="https://lce.umd.edu/">Laboratory of Cognition and Emotion</a> at the University of Maryland, College Park, where he studies how emotion and cognition interact. On this episode, we discuss many of the topics from his latest book, <a href="https://amzn.to/3VKZPcm">The Entangled Brain: How Perception, Cognition, and Emotion Are Woven Together</a>, which is aimed at a general audience. The book argues we need to re-think how to study the brain. Traditionally, cognitive functions of the brain have been studied in a modular fashion: area X <em>does</em> function Y. However, modern research has revealed the brain is highly complex and carries out cognitive functions in a much more interactive and integrative fashion: a given cognitive function results from many areas and circuits temporarily coalescing (for similar ideas, see also <a href="https://braininspired.co/podcast/152/">BI 152 Michael L. Anderson: After Phrenology: Neural Reuse</a>). Luiz and I discuss the implications of studying the brain from a complex systems perspective, why we need go beyond thinking about anatomy and instead think about functional organization, some of the brain's principles of organization, and a lot more.</p>





<ul>
<li><a href="https://lce.umd.edu/">Laboratory of Cognition and Emotion</a>.</li>



<li>Twitter: <a href="https://twitter.com/PessoaBrain">@PessoaBrain</a>.</li>



<li>Book: <a href="https://amzn.to/3VKZPcm">The Entangled Brain: How Perception, Cognition, and Emotion Are Woven Together</a></li>
</ul>



<p>0:00 - Intro
2:47 - The Entangled Brain
16:24 - How to think about complex systems
23:41 - Modularity thinking
28:16 - How to train one's mind to think complex
33:26 - Problem or principle?
44:22 - Complex behaviors
47:06 - Organization vs. structure
51:09 - Principles of organization: Massive Combinatorial Anatomical Connectivity
55:15 - Principles of organization: High Distributed Functional Connectivity
1:00:50 - Principles of organization: Networks as Functional Units
1:06:15 - Principles of Organization: Interactions via Cortical-Subcortical Loops
1:08:53 - Open and closed loops
1:16:43 - Principles of organization: Connectivity with the Body
1:21:28 - Consciousness
1:24:53 - Emotions
1:32:49 - Emottions and AI
1:39:47 - Emotion as a concept
1:43:25 - Complexity and functional organization in AI</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Support the show to get full episodes and join the Discord community.







Check out my free video series about what's missing in AI and Neuroscience















Luiz Pessoa runs his Laboratory of Cognition and Emotion at the University of Maryland, College Park, where he studies how emotion and cognition interact. On this episode, we discuss many of the topics from his latest book, The Entangled Brain: How Perception, Cognition, and Emotion Are Woven Together, which is aimed at a general audience. The book argues we need to re-think how to study the brain. Traditionally, cognitive functions of the brain have been studied in a modular fashion: area X does function Y. However, modern research has revealed the brain is highly complex and carries out cognitive functions in a much more interactive and integrative fashion: a given cognitive function results from many areas and circuits temporarily coalescing (for similar ideas, see also BI 152 Michael L. Anderson: After Phrenology: Neural Reuse). Luiz and I discuss the implications of studying the brain from a complex systems perspective, why we need go beyond thinking about anatomy and instead think about functional organization, some of the brain's principles of organization, and a lot more.






Laboratory of Cognition and Emotion.



Twitter: @PessoaBrain.



Book: The Entangled Brain: How Perception, Cognition, and Emotion Are Woven Together




0:00 - Intro
2:47 - The Entangled Brain
16:24 - How to think about complex systems
23:41 - Modularity thinking
28:16 - How to train one's mind to think complex
33:26 - Problem or principle?
44:22 - Complex behaviors
47:06 - Organization vs. structure
51:09 - Principles of organization: Massive Combinatorial Anatomical Connectivity
55:15 - Principles of organization: High Distributed Functional Connectivity
1:00:50 - Principles of organization: Networks as Functional Units
1:06:15 - Principles of Organization: Interactions via Cortical-Subcortical Loops
1:08:53 - Open and closed loops
1:16:43 - Principles of organization: Connectivity with the Body
1:21:28 - Consciousness
1:24:53 - Emotions
1:32:49 - Emottions and AI
1:39:47 - Emotion as a concept
1:43:25 - Complexity and functional organization in AI]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 155 Luiz Pessoa: The Entangled Brain]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>



<p class="has-text-align-center"><a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener">Check out my free video series about what's missing in AI and Neuroscience</a></p>















<p>Luiz Pessoa runs his <a href="https://lce.umd.edu/">Laboratory of Cognition and Emotion</a> at the University of Maryland, College Park, where he studies how emotion and cognition interact. On this episode, we discuss many of the topics from his latest book, <a href="https://amzn.to/3VKZPcm">The Entangled Brain: How Perception, Cognition, and Emotion Are Woven Together</a>, which is aimed at a general audience. The book argues we need to re-think how to study the brain. Traditionally, cognitive functions of the brain have been studied in a modular fashion: area X <em>does</em> function Y. However, modern research has revealed the brain is highly complex and carries out cognitive functions in a much more interactive and integrative fashion: a given cognitive function results from many areas and circuits temporarily coalescing (for similar ideas, see also <a href="https://braininspired.co/podcast/152/">BI 152 Michael L. Anderson: After Phrenology: Neural Reuse</a>). Luiz and I discuss the implications of studying the brain from a complex systems perspective, why we need go beyond thinking about anatomy and instead think about functional organization, some of the brain's principles of organization, and a lot more.</p>





<ul>
<li><a href="https://lce.umd.edu/">Laboratory of Cognition and Emotion</a>.</li>



<li>Twitter: <a href="https://twitter.com/PessoaBrain">@PessoaBrain</a>.</li>



<li>Book: <a href="https://amzn.to/3VKZPcm">The Entangled Brain: How Perception, Cognition, and Emotion Are Woven Together</a></li>
</ul>



<p>0:00 - Intro
2:47 - The Entangled Brain
16:24 - How to think about complex systems
23:41 - Modularity thinking
28:16 - How to train one's mind to think complex
33:26 - Problem or principle?
44:22 - Complex behaviors
47:06 - Organization vs. structure
51:09 - Principles of organization: Massive Combinatorial Anatomical Connectivity
55:15 - Principles of organization: High Distributed Functional Connectivity
1:00:50 - Principles of organization: Networks as Functional Units
1:06:15 - Principles of Organization: Interactions via Cortical-Subcortical Loops
1:08:53 - Open and closed loops
1:16:43 - Principles of organization: Connectivity with the Body
1:21:28 - Consciousness
1:24:53 - Emotions
1:32:49 - Emottions and AI
1:39:47 - Emotion as a concept
1:43:25 - Complexity and functional organization in AI</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/bb634884-aaf8-465e-b5cf-9510a8b90351-155-Luiz-Pessoa-public.mp3" length="110167230"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Support the show to get full episodes and join the Discord community.







Check out my free video series about what's missing in AI and Neuroscience















Luiz Pessoa runs his Laboratory of Cognition and Emotion at the University of Maryland, College Park, where he studies how emotion and cognition interact. On this episode, we discuss many of the topics from his latest book, The Entangled Brain: How Perception, Cognition, and Emotion Are Woven Together, which is aimed at a general audience. The book argues we need to re-think how to study the brain. Traditionally, cognitive functions of the brain have been studied in a modular fashion: area X does function Y. However, modern research has revealed the brain is highly complex and carries out cognitive functions in a much more interactive and integrative fashion: a given cognitive function results from many areas and circuits temporarily coalescing (for similar ideas, see also BI 152 Michael L. Anderson: After Phrenology: Neural Reuse). Luiz and I discuss the implications of studying the brain from a complex systems perspective, why we need go beyond thinking about anatomy and instead think about functional organization, some of the brain's principles of organization, and a lot more.






Laboratory of Cognition and Emotion.



Twitter: @PessoaBrain.



Book: The Entangled Brain: How Perception, Cognition, and Emotion Are Woven Together




0:00 - Intro
2:47 - The Entangled Brain
16:24 - How to think about complex systems
23:41 - Modularity thinking
28:16 - How to train one's mind to think complex
33:26 - Problem or principle?
44:22 - Complex behaviors
47:06 - Organization vs. structure
51:09 - Principles of organization: Massive Combinatorial Anatomical Connectivity
55:15 - Principles of organization: High Distributed Functional Connectivity
1:00:50 - Principles of organization: Networks as Functional Units
1:06:15 - Principles of Organization: Interactions via Cortical-Subcortical Loops
1:08:53 - Open and closed loops
1:16:43 - Principles of organization: Connectivity with the Body
1:21:28 - Consciousness
1:24:53 - Emotions
1:32:49 - Emottions and AI
1:39:47 - Emotion as a concept
1:43:25 - Complexity and functional organization in AI]]>
                </itunes:summary>
                                                                            <itunes:duration>01:54:26</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 154 Anne Collins: Learning with Working Memory]]>
                </title>
                <pubDate>Tue, 29 Nov 2022 02:45:04 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/1337514</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-154-anne-collins-learning-with-working-memory</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener">Check out my free video series about what's missing in AI and Neuroscience</a></p>







<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p>Anne Collins runs her  <a href="https://ccn.berkeley.edu/">Computational Cognitive Neuroscience Lab</a> at the University of California, Berkley One of the things she's been working on for years is how our working memory plays a role in learning as well, and specifically how working memory and reinforcement learning interact to affect how we learn, depending on the nature of what we're trying to learn. We discuss that interaction specifically. We also discuss more broadly how segregated and how overlapping and interacting our cognitive functions are, what that implies about our natural tendency to think in dichotomies - like MF vs MB-RL, system-1 vs system-2, etc., and we dive into plenty other subjects, like how to possibly incorporate these ideas into AI.</p>



<ul>
<li><a href="https://ccn.berkeley.edu/">Computational Cognitive Neuroscience Lab</a>.</li>



<li>Twitter: <a href="https://twitter.com/ccnlab">@ccnlab</a> or <a href="https://twitter.com/Anne_on_Tw">@Anne_On_Tw</a>.</li>



<li>Related papers:
<ul>
<li><a href="https://ccn.berkeley.edu/pdfs/papers/YooCollins2022JoCN_WMRL.pdf">How Working Memory and Reinforcement Learning Are Intertwined: A Cognitive, Neural, and Computational Perspective</a>. </li>



<li><a href="https://ccn.berkeley.edu/pdfs/papers/MBMF_NatureReviews_R2.pdf">Beyond simple dichotomies in reinforcement learning</a>.</li>



<li><a href="https://ccn.berkeley.edu/pdfs/papers/EFshapesRL2020_R1.pdf">The Role of Executive Function in Shaping Reinforcement Learning</a>.</li>



<li><a href="https://ccn.berkeley.edu/pdfs/papers/EcksteinWilbrechtCollins_2021.pdf">What do reinforcement learning models measure? Interpreting model parameters in cognition and neuroscience</a>.</li>
</ul>
</li>
</ul>



<p>0:00 - Intro
5:25 - Dimensionality of learning
11:19 - Modularity of function and computations
16:51 - Is working memory a thing?
19:33 - Model-free model-based dichotomy
30:40 - Working memory and RL
44:43 - How working memory and RL interact
50:50 - Working memory and attention
59:37 - Computations vs. implementations
1:03:25 - Interpreting results
1:08:00 - Working memory and AI</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Check out my free video series about what's missing in AI and Neuroscience







Support the show to get full episodes and join the Discord community.









Anne Collins runs her  Computational Cognitive Neuroscience Lab at the University of California, Berkley One of the things she's been working on for years is how our working memory plays a role in learning as well, and specifically how working memory and reinforcement learning interact to affect how we learn, depending on the nature of what we're trying to learn. We discuss that interaction specifically. We also discuss more broadly how segregated and how overlapping and interacting our cognitive functions are, what that implies about our natural tendency to think in dichotomies - like MF vs MB-RL, system-1 vs system-2, etc., and we dive into plenty other subjects, like how to possibly incorporate these ideas into AI.




Computational Cognitive Neuroscience Lab.



Twitter: @ccnlab or @Anne_On_Tw.



Related papers:

How Working Memory and Reinforcement Learning Are Intertwined: A Cognitive, Neural, and Computational Perspective. 



Beyond simple dichotomies in reinforcement learning.



The Role of Executive Function in Shaping Reinforcement Learning.



What do reinforcement learning models measure? Interpreting model parameters in cognition and neuroscience.






0:00 - Intro
5:25 - Dimensionality of learning
11:19 - Modularity of function and computations
16:51 - Is working memory a thing?
19:33 - Model-free model-based dichotomy
30:40 - Working memory and RL
44:43 - How working memory and RL interact
50:50 - Working memory and attention
59:37 - Computations vs. implementations
1:03:25 - Interpreting results
1:08:00 - Working memory and AI]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 154 Anne Collins: Learning with Working Memory]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener">Check out my free video series about what's missing in AI and Neuroscience</a></p>







<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p>Anne Collins runs her  <a href="https://ccn.berkeley.edu/">Computational Cognitive Neuroscience Lab</a> at the University of California, Berkley One of the things she's been working on for years is how our working memory plays a role in learning as well, and specifically how working memory and reinforcement learning interact to affect how we learn, depending on the nature of what we're trying to learn. We discuss that interaction specifically. We also discuss more broadly how segregated and how overlapping and interacting our cognitive functions are, what that implies about our natural tendency to think in dichotomies - like MF vs MB-RL, system-1 vs system-2, etc., and we dive into plenty other subjects, like how to possibly incorporate these ideas into AI.</p>



<ul>
<li><a href="https://ccn.berkeley.edu/">Computational Cognitive Neuroscience Lab</a>.</li>



<li>Twitter: <a href="https://twitter.com/ccnlab">@ccnlab</a> or <a href="https://twitter.com/Anne_on_Tw">@Anne_On_Tw</a>.</li>



<li>Related papers:
<ul>
<li><a href="https://ccn.berkeley.edu/pdfs/papers/YooCollins2022JoCN_WMRL.pdf">How Working Memory and Reinforcement Learning Are Intertwined: A Cognitive, Neural, and Computational Perspective</a>. </li>



<li><a href="https://ccn.berkeley.edu/pdfs/papers/MBMF_NatureReviews_R2.pdf">Beyond simple dichotomies in reinforcement learning</a>.</li>



<li><a href="https://ccn.berkeley.edu/pdfs/papers/EFshapesRL2020_R1.pdf">The Role of Executive Function in Shaping Reinforcement Learning</a>.</li>



<li><a href="https://ccn.berkeley.edu/pdfs/papers/EcksteinWilbrechtCollins_2021.pdf">What do reinforcement learning models measure? Interpreting model parameters in cognition and neuroscience</a>.</li>
</ul>
</li>
</ul>



<p>0:00 - Intro
5:25 - Dimensionality of learning
11:19 - Modularity of function and computations
16:51 - Is working memory a thing?
19:33 - Model-free model-based dichotomy
30:40 - Working memory and RL
44:43 - How working memory and RL interact
50:50 - Working memory and attention
59:37 - Computations vs. implementations
1:03:25 - Interpreting results
1:08:00 - Working memory and AI</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/1f9b30a6-3d7b-4d85-b9d7-4d4e5e35aa9a-154-Anne-Collins-public.mp3" length="79451920"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Check out my free video series about what's missing in AI and Neuroscience







Support the show to get full episodes and join the Discord community.









Anne Collins runs her  Computational Cognitive Neuroscience Lab at the University of California, Berkley One of the things she's been working on for years is how our working memory plays a role in learning as well, and specifically how working memory and reinforcement learning interact to affect how we learn, depending on the nature of what we're trying to learn. We discuss that interaction specifically. We also discuss more broadly how segregated and how overlapping and interacting our cognitive functions are, what that implies about our natural tendency to think in dichotomies - like MF vs MB-RL, system-1 vs system-2, etc., and we dive into plenty other subjects, like how to possibly incorporate these ideas into AI.




Computational Cognitive Neuroscience Lab.



Twitter: @ccnlab or @Anne_On_Tw.



Related papers:

How Working Memory and Reinforcement Learning Are Intertwined: A Cognitive, Neural, and Computational Perspective. 



Beyond simple dichotomies in reinforcement learning.



The Role of Executive Function in Shaping Reinforcement Learning.



What do reinforcement learning models measure? Interpreting model parameters in cognition and neuroscience.






0:00 - Intro
5:25 - Dimensionality of learning
11:19 - Modularity of function and computations
16:51 - Is working memory a thing?
19:33 - Model-free model-based dichotomy
30:40 - Working memory and RL
44:43 - How working memory and RL interact
50:50 - Working memory and attention
59:37 - Computations vs. implementations
1:03:25 - Interpreting results
1:08:00 - Working memory and AI]]>
                </itunes:summary>
                                                                            <itunes:duration>01:22:27</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 153 Carolyn Dicey-Jennings: Attention and the Self]]>
                </title>
                <pubDate>Fri, 18 Nov 2022 15:39:58 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/1325924</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-153-carolyn-jennings-attention-and-the-self</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener">Check out my free video series about what's missing in AI and Neuroscience</a></p>







<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p>Carolyn Dicey Jennings is a philosopher and a cognitive scientist at University of California, Merced. In her book <a href="https://amzn.to/3Temj48">The Attending Mind</a>, she lays out an attempt to unify the concept of attention. Carolyn defines attention roughly as the mental prioritization of some stuff over other stuff based on our collective interests. And one of her main claims is that attention is evidence of a real, emergent self or subject, that can't be reduced to microscopic brain activity. She does connect attention to more macroscopic brain activity, suggesting slow longer-range oscillations in our brains can alter or entrain the activity of more local neural activity, and this is a candidate for mental causation. We unpack that more in our discussion, and how Carolyn situates attention among other cognitive functions, like consciousness, action, and perception.</p>





<ul>
<li><a href="http://faculty.ucmerced.edu/cjennings3/#">Carolyn's website</a>.</li>



<li>Books:
<ul>
<li><a href="https://amzn.to/3Temj48">The Attending Mind</a>.</li>
</ul>
</li>



<li>Aeon article:
<ul>
<li><a href="https://aeon.co/essays/what-is-the-self-if-not-that-which-pays-attention">I Attend, Therefore I Am</a>.</li>
</ul>
</li>



<li>Related papers
<ul>
<li><a href="http://faculty.ucmerced.edu/cjennings3/Synthese.pdf">The Subject of Attention</a>.</li>



<li><a href="http://faculty.ucmerced.edu/cjennings3/ConsciousnessMind.pdf">Consciousness and Mind</a>.</li>



<li><a href="https://philpapers.org/archive/JENPRA-2.pdf">Practical Realism about the Self</a>.</li>
</ul>
</li>
</ul>



<p>0:00 - Intro
12:15 - Reconceptualizing attention
16:07 - Types of attention
19:02 - Predictive processing and attention
23:19 - Consciousness, identity, and self
30:39 - Attention and the brain
35:47 - Integrated information theory
42:05 - Neural attention
52:08 - Decoupling oscillations from spikes
57:16 - Selves in other organisms
1:00:42 - AI and the self
1:04:43 - Attention, consciousness, conscious perception
1:08:36 - Meaning and attention
1:11:12 - Conscious entrainment
1:19:57 - Is attention a switch or knob?</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Check out my free video series about what's missing in AI and Neuroscience







Support the show to get full episodes and join the Discord community.









Carolyn Dicey Jennings is a philosopher and a cognitive scientist at University of California, Merced. In her book The Attending Mind, she lays out an attempt to unify the concept of attention. Carolyn defines attention roughly as the mental prioritization of some stuff over other stuff based on our collective interests. And one of her main claims is that attention is evidence of a real, emergent self or subject, that can't be reduced to microscopic brain activity. She does connect attention to more macroscopic brain activity, suggesting slow longer-range oscillations in our brains can alter or entrain the activity of more local neural activity, and this is a candidate for mental causation. We unpack that more in our discussion, and how Carolyn situates attention among other cognitive functions, like consciousness, action, and perception.






Carolyn's website.



Books:

The Attending Mind.





Aeon article:

I Attend, Therefore I Am.





Related papers

The Subject of Attention.



Consciousness and Mind.



Practical Realism about the Self.






0:00 - Intro
12:15 - Reconceptualizing attention
16:07 - Types of attention
19:02 - Predictive processing and attention
23:19 - Consciousness, identity, and self
30:39 - Attention and the brain
35:47 - Integrated information theory
42:05 - Neural attention
52:08 - Decoupling oscillations from spikes
57:16 - Selves in other organisms
1:00:42 - AI and the self
1:04:43 - Attention, consciousness, conscious perception
1:08:36 - Meaning and attention
1:11:12 - Conscious entrainment
1:19:57 - Is attention a switch or knob?]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 153 Carolyn Dicey-Jennings: Attention and the Self]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener">Check out my free video series about what's missing in AI and Neuroscience</a></p>







<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p>Carolyn Dicey Jennings is a philosopher and a cognitive scientist at University of California, Merced. In her book <a href="https://amzn.to/3Temj48">The Attending Mind</a>, she lays out an attempt to unify the concept of attention. Carolyn defines attention roughly as the mental prioritization of some stuff over other stuff based on our collective interests. And one of her main claims is that attention is evidence of a real, emergent self or subject, that can't be reduced to microscopic brain activity. She does connect attention to more macroscopic brain activity, suggesting slow longer-range oscillations in our brains can alter or entrain the activity of more local neural activity, and this is a candidate for mental causation. We unpack that more in our discussion, and how Carolyn situates attention among other cognitive functions, like consciousness, action, and perception.</p>





<ul>
<li><a href="http://faculty.ucmerced.edu/cjennings3/#">Carolyn's website</a>.</li>



<li>Books:
<ul>
<li><a href="https://amzn.to/3Temj48">The Attending Mind</a>.</li>
</ul>
</li>



<li>Aeon article:
<ul>
<li><a href="https://aeon.co/essays/what-is-the-self-if-not-that-which-pays-attention">I Attend, Therefore I Am</a>.</li>
</ul>
</li>



<li>Related papers
<ul>
<li><a href="http://faculty.ucmerced.edu/cjennings3/Synthese.pdf">The Subject of Attention</a>.</li>



<li><a href="http://faculty.ucmerced.edu/cjennings3/ConsciousnessMind.pdf">Consciousness and Mind</a>.</li>



<li><a href="https://philpapers.org/archive/JENPRA-2.pdf">Practical Realism about the Self</a>.</li>
</ul>
</li>
</ul>



<p>0:00 - Intro
12:15 - Reconceptualizing attention
16:07 - Types of attention
19:02 - Predictive processing and attention
23:19 - Consciousness, identity, and self
30:39 - Attention and the brain
35:47 - Integrated information theory
42:05 - Neural attention
52:08 - Decoupling oscillations from spikes
57:16 - Selves in other organisms
1:00:42 - AI and the self
1:04:43 - Attention, consciousness, conscious perception
1:08:36 - Meaning and attention
1:11:12 - Conscious entrainment
1:19:57 - Is attention a switch or knob?</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/458f4c7e-09c7-4b88-b29f-63ad8a8ed13c-153-Carolyn-Jennings-public.mp3" length="82384918"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Check out my free video series about what's missing in AI and Neuroscience







Support the show to get full episodes and join the Discord community.









Carolyn Dicey Jennings is a philosopher and a cognitive scientist at University of California, Merced. In her book The Attending Mind, she lays out an attempt to unify the concept of attention. Carolyn defines attention roughly as the mental prioritization of some stuff over other stuff based on our collective interests. And one of her main claims is that attention is evidence of a real, emergent self or subject, that can't be reduced to microscopic brain activity. She does connect attention to more macroscopic brain activity, suggesting slow longer-range oscillations in our brains can alter or entrain the activity of more local neural activity, and this is a candidate for mental causation. We unpack that more in our discussion, and how Carolyn situates attention among other cognitive functions, like consciousness, action, and perception.






Carolyn's website.



Books:

The Attending Mind.





Aeon article:

I Attend, Therefore I Am.





Related papers

The Subject of Attention.



Consciousness and Mind.



Practical Realism about the Self.






0:00 - Intro
12:15 - Reconceptualizing attention
16:07 - Types of attention
19:02 - Predictive processing and attention
23:19 - Consciousness, identity, and self
30:39 - Attention and the brain
35:47 - Integrated information theory
42:05 - Neural attention
52:08 - Decoupling oscillations from spikes
57:16 - Selves in other organisms
1:00:42 - AI and the self
1:04:43 - Attention, consciousness, conscious perception
1:08:36 - Meaning and attention
1:11:12 - Conscious entrainment
1:19:57 - Is attention a switch or knob?]]>
                </itunes:summary>
                                                                            <itunes:duration>01:25:30</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 152 Michael L. Anderson: After Phrenology: Neural Reuse]]>
                </title>
                <pubDate>Tue, 08 Nov 2022 16:04:39 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/1314489</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-152-michael-l-anderson-after-phrenology-neural-reuse</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener">Check out my free video series about what's missing in AI and Neuroscience</a></p>







<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p>Michael L. Anderson is a professor at the Rotman Institute of Philosophy, at Western University. His book, <a href="https://amzn.to/3BYCs8m">After Phrenology: Neural Reuse and the Interactive Brain</a>, calls for a re-conceptualization of how we understand and study brains and minds. Neural reuse is the phenomenon that any given brain area is active for multiple cognitive functions, and partners with different sets of brain areas to carry out different cognitive functions. We discuss the implications for this, and other topics in Michael's research and the book, like evolution, embodied cognition, and Gibsonian perception. Michael also fields guest questions from <a href="https://braininspired.co/podcast/77/">John Krakauer</a> and <a href="https://braininspired.co/podcast/136/">Alex Gomez-Marin</a>, about representations and metaphysics, respectively.</p>





<ul>
<li><a href="https://www.rotman.uwo.ca/portfolio-items/anderson-michael-l/">Michael's website</a>.</li>



<li>Twitter: <a href="https://twitter.com/mljanderson">@mljanderson</a>.</li>



<li>Book:
<ul>
<li><a href="https://amzn.to/3BYCs8m">After Phrenology: Neural Reuse and the Interactive Brain</a>.</li>
</ul>
</li>



<li>Related papers
<ul>
<li><a href="https://www.academia.edu/en/278902/Neural_reuse_a_fundamental_organizational_principle_of_the_brain">Neural reuse: a fundamental organizational principle of the brain.</a></li>



<li><a href="http://philsci-archive.pitt.edu/20003/1/AndersonChampion2021.pdf">Some dilemmas for an account of neural representation: A reply to Poldrack.</a></li>



<li><a href="http://philsci-archive.pitt.edu/20426/1/Davies-Barton%20et%20al.%20(2022).pdf">Debt-free intelligence: Ecological information in minds and machines</a></li>



<li><a href="https://europepmc.org/backend/ptpmcrender.fcgi?accid=PMC3756684&amp;blobtype=pdf">Describing functional diversity of brain regions and brain networks</a>.</li>
</ul>
</li>
</ul>



<p>0:00 - Intro
3:02 - After Phrenology
13:18 - Typical neuroscience experiment
16:29 - Neural reuse
18:37 - 4E cognition and representations
22:48 - John Krakauer question
27:38 - Gibsonian perception
36:17 - Autoencoders without representations
49:22 - Pluralism
52:42 - Alex Gomez-Marin question - metaphysics
1:01:26 - Stimulus-response historical neuroscience
1:10:59 - After Phrenology influence
1:19:24 - Origins of neural reuse
1:35:25 - The way forward</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Check out my free video series about what's missing in AI and Neuroscience







Support the show to get full episodes and join the Discord community.









Michael L. Anderson is a professor at the Rotman Institute of Philosophy, at Western University. His book, After Phrenology: Neural Reuse and the Interactive Brain, calls for a re-conceptualization of how we understand and study brains and minds. Neural reuse is the phenomenon that any given brain area is active for multiple cognitive functions, and partners with different sets of brain areas to carry out different cognitive functions. We discuss the implications for this, and other topics in Michael's research and the book, like evolution, embodied cognition, and Gibsonian perception. Michael also fields guest questions from John Krakauer and Alex Gomez-Marin, about representations and metaphysics, respectively.






Michael's website.



Twitter: @mljanderson.



Book:

After Phrenology: Neural Reuse and the Interactive Brain.





Related papers

Neural reuse: a fundamental organizational principle of the brain.



Some dilemmas for an account of neural representation: A reply to Poldrack.



Debt-free intelligence: Ecological information in minds and machines



Describing functional diversity of brain regions and brain networks.






0:00 - Intro
3:02 - After Phrenology
13:18 - Typical neuroscience experiment
16:29 - Neural reuse
18:37 - 4E cognition and representations
22:48 - John Krakauer question
27:38 - Gibsonian perception
36:17 - Autoencoders without representations
49:22 - Pluralism
52:42 - Alex Gomez-Marin question - metaphysics
1:01:26 - Stimulus-response historical neuroscience
1:10:59 - After Phrenology influence
1:19:24 - Origins of neural reuse
1:35:25 - The way forward]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 152 Michael L. Anderson: After Phrenology: Neural Reuse]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener">Check out my free video series about what's missing in AI and Neuroscience</a></p>







<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p>Michael L. Anderson is a professor at the Rotman Institute of Philosophy, at Western University. His book, <a href="https://amzn.to/3BYCs8m">After Phrenology: Neural Reuse and the Interactive Brain</a>, calls for a re-conceptualization of how we understand and study brains and minds. Neural reuse is the phenomenon that any given brain area is active for multiple cognitive functions, and partners with different sets of brain areas to carry out different cognitive functions. We discuss the implications for this, and other topics in Michael's research and the book, like evolution, embodied cognition, and Gibsonian perception. Michael also fields guest questions from <a href="https://braininspired.co/podcast/77/">John Krakauer</a> and <a href="https://braininspired.co/podcast/136/">Alex Gomez-Marin</a>, about representations and metaphysics, respectively.</p>





<ul>
<li><a href="https://www.rotman.uwo.ca/portfolio-items/anderson-michael-l/">Michael's website</a>.</li>



<li>Twitter: <a href="https://twitter.com/mljanderson">@mljanderson</a>.</li>



<li>Book:
<ul>
<li><a href="https://amzn.to/3BYCs8m">After Phrenology: Neural Reuse and the Interactive Brain</a>.</li>
</ul>
</li>



<li>Related papers
<ul>
<li><a href="https://www.academia.edu/en/278902/Neural_reuse_a_fundamental_organizational_principle_of_the_brain">Neural reuse: a fundamental organizational principle of the brain.</a></li>



<li><a href="http://philsci-archive.pitt.edu/20003/1/AndersonChampion2021.pdf">Some dilemmas for an account of neural representation: A reply to Poldrack.</a></li>



<li><a href="http://philsci-archive.pitt.edu/20426/1/Davies-Barton%20et%20al.%20(2022).pdf">Debt-free intelligence: Ecological information in minds and machines</a></li>



<li><a href="https://europepmc.org/backend/ptpmcrender.fcgi?accid=PMC3756684&amp;blobtype=pdf">Describing functional diversity of brain regions and brain networks</a>.</li>
</ul>
</li>
</ul>



<p>0:00 - Intro
3:02 - After Phrenology
13:18 - Typical neuroscience experiment
16:29 - Neural reuse
18:37 - 4E cognition and representations
22:48 - John Krakauer question
27:38 - Gibsonian perception
36:17 - Autoencoders without representations
49:22 - Pluralism
52:42 - Alex Gomez-Marin question - metaphysics
1:01:26 - Stimulus-response historical neuroscience
1:10:59 - After Phrenology influence
1:19:24 - Origins of neural reuse
1:35:25 - The way forward</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/b332dc03-463c-4d68-859d-4834a20e3dc8-152-Michael-Anderson-public.mp3" length="101281344"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Check out my free video series about what's missing in AI and Neuroscience







Support the show to get full episodes and join the Discord community.









Michael L. Anderson is a professor at the Rotman Institute of Philosophy, at Western University. His book, After Phrenology: Neural Reuse and the Interactive Brain, calls for a re-conceptualization of how we understand and study brains and minds. Neural reuse is the phenomenon that any given brain area is active for multiple cognitive functions, and partners with different sets of brain areas to carry out different cognitive functions. We discuss the implications for this, and other topics in Michael's research and the book, like evolution, embodied cognition, and Gibsonian perception. Michael also fields guest questions from John Krakauer and Alex Gomez-Marin, about representations and metaphysics, respectively.






Michael's website.



Twitter: @mljanderson.



Book:

After Phrenology: Neural Reuse and the Interactive Brain.





Related papers

Neural reuse: a fundamental organizational principle of the brain.



Some dilemmas for an account of neural representation: A reply to Poldrack.



Debt-free intelligence: Ecological information in minds and machines



Describing functional diversity of brain regions and brain networks.






0:00 - Intro
3:02 - After Phrenology
13:18 - Typical neuroscience experiment
16:29 - Neural reuse
18:37 - 4E cognition and representations
22:48 - John Krakauer question
27:38 - Gibsonian perception
36:17 - Autoencoders without representations
49:22 - Pluralism
52:42 - Alex Gomez-Marin question - metaphysics
1:01:26 - Stimulus-response historical neuroscience
1:10:59 - After Phrenology influence
1:19:24 - Origins of neural reuse
1:35:25 - The way forward]]>
                </itunes:summary>
                                                                            <itunes:duration>01:45:11</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 151 Steve Byrnes: Brain-like AGI Safety]]>
                </title>
                <pubDate>Sun, 30 Oct 2022 16:48:42 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/1308020</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-151-steve-byrnes-brain-like-agi-safety</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>






<p>Steve Byrnes is a physicist turned AGI safety researcher. He's concerned that when we create AGI, whenever and however that might happen, we run the risk of creating it in a less than perfectly safe way. AGI safety (AGI not doing something bad) is a wide net that encompasses AGI alignment (AGI doing what we want it to do). We discuss a host of ideas Steve writes about in his <a href="https://www.alignmentforum.org/s/HzcM2dkCq7fwXBej8">Intro to Brain-Like-AGI Safety</a> blog series, which uses what he has learned about brains to address how we might safely make AGI.</p>







<ul><li><a href="https://sjbyrnes.com/index.html">Steve's website</a>.</li><li>Twitter: <a href="https://twitter.com/steve47285">@steve47285</a></li><li><a href="https://www.alignmentforum.org/s/HzcM2dkCq7fwXBej8">Intro to Brain-Like-AGI Safety</a>.</li></ul>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Support the show to get full episodes and join the Discord community.










Steve Byrnes is a physicist turned AGI safety researcher. He's concerned that when we create AGI, whenever and however that might happen, we run the risk of creating it in a less than perfectly safe way. AGI safety (AGI not doing something bad) is a wide net that encompasses AGI alignment (AGI doing what we want it to do). We discuss a host of ideas Steve writes about in his Intro to Brain-Like-AGI Safety blog series, which uses what he has learned about brains to address how we might safely make AGI.







Steve's website.Twitter: @steve47285Intro to Brain-Like-AGI Safety.]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 151 Steve Byrnes: Brain-like AGI Safety]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>






<p>Steve Byrnes is a physicist turned AGI safety researcher. He's concerned that when we create AGI, whenever and however that might happen, we run the risk of creating it in a less than perfectly safe way. AGI safety (AGI not doing something bad) is a wide net that encompasses AGI alignment (AGI doing what we want it to do). We discuss a host of ideas Steve writes about in his <a href="https://www.alignmentforum.org/s/HzcM2dkCq7fwXBej8">Intro to Brain-Like-AGI Safety</a> blog series, which uses what he has learned about brains to address how we might safely make AGI.</p>







<ul><li><a href="https://sjbyrnes.com/index.html">Steve's website</a>.</li><li>Twitter: <a href="https://twitter.com/steve47285">@steve47285</a></li><li><a href="https://www.alignmentforum.org/s/HzcM2dkCq7fwXBej8">Intro to Brain-Like-AGI Safety</a>.</li></ul>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/38a56d8b-586b-4897-a9cf-ebe9e5c5f329-151-Steve-Byrnes.mp3" length="87930401"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Support the show to get full episodes and join the Discord community.










Steve Byrnes is a physicist turned AGI safety researcher. He's concerned that when we create AGI, whenever and however that might happen, we run the risk of creating it in a less than perfectly safe way. AGI safety (AGI not doing something bad) is a wide net that encompasses AGI alignment (AGI doing what we want it to do). We discuss a host of ideas Steve writes about in his Intro to Brain-Like-AGI Safety blog series, which uses what he has learned about brains to address how we might safely make AGI.







Steve's website.Twitter: @steve47285Intro to Brain-Like-AGI Safety.]]>
                </itunes:summary>
                                                                            <itunes:duration>01:31:17</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 150 Dan Nicholson: Machines, Organisms, Processes]]>
                </title>
                <pubDate>Sat, 15 Oct 2022 17:48:12 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/1294629</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-150-dan-nicholson-machines-organisms-processes</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>


<p class="has-text-align-center"><a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener">Check out my free video series about what's missing in AI and Neuroscience</a></p>










<p>Dan Nicholson is a philosopher at George Mason University. He incorporates the history of science and philosophy into modern analyses of our conceptions of processes related to life and organisms. He is also interested in re-orienting our conception of the universe as made fundamentally of things/substances, and replacing it with the idea the universe is made fundamentally of processes (process philosophy). In this episode, we both of those subjects, the why the "machine conception of the organism" is incorrect, how to apply these ideas to topics like neuroscience and artificial intelligence, and much more.</p>





<ul><li><a href="https://philosophy.gmu.edu/people/dnicho">Dan's website</a>. <a href="https://scholar.google.com/citations?hl=en&amp;user=5gxpRPYAAAAJ&amp;view_op=list_works&amp;sortby=pubdate">Google Scholar</a>.</li><li>Twitter: <a href="https://twitter.com/NicholsonHPBio">@NicholsonHPBio</a></li><li>Book<ul><li><a href="https://amzn.to/3CNSjqu">Everything Flows: Towards a Processual Philosophy of Biology</a>.</li></ul></li><li>Related papers<ul><li><a href="https://philpapers.org/archive/NICITC.pdf">Is the Cell Really a Machine?</a></li><li><a href="https://philarchive.org/archive/NICTMC-2">The Machine Conception of the Organism in Development and Evolution: A Critical Analysis</a>.</li><li><a href="https://philpapers.org/archive/NICOBT-2.pdf">On Being the Right Size, Revisited: The Problem with Engineering Metaphors in Molecular Biology</a>.</li></ul></li><li>Related episode: <a href="https://braininspired.co/podcast/118/">BI 118 Johannes Jäger: Beyond Networks</a>.</li></ul>



<p>0:00 - Intro
2:49 - Philosophy and science
16:37 - Role of history
23:28 - What Is Life? And interaction with James Watson
38:37 - Arguments against the machine conception of organisms
49:08 - Organisms as streams (processes)
57:52 - Process philosophy
1:08:59 - Alfred North Whitehead
1:12:45 - Process and consciousness
1:22:16 - Artificial intelligence and process
1:31:47 - Language and symbols and processes</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Support the show to get full episodes and join the Discord community.






Check out my free video series about what's missing in AI and Neuroscience










Dan Nicholson is a philosopher at George Mason University. He incorporates the history of science and philosophy into modern analyses of our conceptions of processes related to life and organisms. He is also interested in re-orienting our conception of the universe as made fundamentally of things/substances, and replacing it with the idea the universe is made fundamentally of processes (process philosophy). In this episode, we both of those subjects, the why the "machine conception of the organism" is incorrect, how to apply these ideas to topics like neuroscience and artificial intelligence, and much more.





Dan's website. Google Scholar.Twitter: @NicholsonHPBioBookEverything Flows: Towards a Processual Philosophy of Biology.Related papersIs the Cell Really a Machine?The Machine Conception of the Organism in Development and Evolution: A Critical Analysis.On Being the Right Size, Revisited: The Problem with Engineering Metaphors in Molecular Biology.Related episode: BI 118 Johannes Jäger: Beyond Networks.



0:00 - Intro
2:49 - Philosophy and science
16:37 - Role of history
23:28 - What Is Life? And interaction with James Watson
38:37 - Arguments against the machine conception of organisms
49:08 - Organisms as streams (processes)
57:52 - Process philosophy
1:08:59 - Alfred North Whitehead
1:12:45 - Process and consciousness
1:22:16 - Artificial intelligence and process
1:31:47 - Language and symbols and processes]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 150 Dan Nicholson: Machines, Organisms, Processes]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>


<p class="has-text-align-center"><a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener">Check out my free video series about what's missing in AI and Neuroscience</a></p>










<p>Dan Nicholson is a philosopher at George Mason University. He incorporates the history of science and philosophy into modern analyses of our conceptions of processes related to life and organisms. He is also interested in re-orienting our conception of the universe as made fundamentally of things/substances, and replacing it with the idea the universe is made fundamentally of processes (process philosophy). In this episode, we both of those subjects, the why the "machine conception of the organism" is incorrect, how to apply these ideas to topics like neuroscience and artificial intelligence, and much more.</p>





<ul><li><a href="https://philosophy.gmu.edu/people/dnicho">Dan's website</a>. <a href="https://scholar.google.com/citations?hl=en&amp;user=5gxpRPYAAAAJ&amp;view_op=list_works&amp;sortby=pubdate">Google Scholar</a>.</li><li>Twitter: <a href="https://twitter.com/NicholsonHPBio">@NicholsonHPBio</a></li><li>Book<ul><li><a href="https://amzn.to/3CNSjqu">Everything Flows: Towards a Processual Philosophy of Biology</a>.</li></ul></li><li>Related papers<ul><li><a href="https://philpapers.org/archive/NICITC.pdf">Is the Cell Really a Machine?</a></li><li><a href="https://philarchive.org/archive/NICTMC-2">The Machine Conception of the Organism in Development and Evolution: A Critical Analysis</a>.</li><li><a href="https://philpapers.org/archive/NICOBT-2.pdf">On Being the Right Size, Revisited: The Problem with Engineering Metaphors in Molecular Biology</a>.</li></ul></li><li>Related episode: <a href="https://braininspired.co/podcast/118/">BI 118 Johannes Jäger: Beyond Networks</a>.</li></ul>



<p>0:00 - Intro
2:49 - Philosophy and science
16:37 - Role of history
23:28 - What Is Life? And interaction with James Watson
38:37 - Arguments against the machine conception of organisms
49:08 - Organisms as streams (processes)
57:52 - Process philosophy
1:08:59 - Alfred North Whitehead
1:12:45 - Process and consciousness
1:22:16 - Artificial intelligence and process
1:31:47 - Language and symbols and processes</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/5ad2a433-295e-4ac8-8446-4f1b8ba339ff-150-Dan-Nicholson-public.mp3" length="94839622"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Support the show to get full episodes and join the Discord community.






Check out my free video series about what's missing in AI and Neuroscience










Dan Nicholson is a philosopher at George Mason University. He incorporates the history of science and philosophy into modern analyses of our conceptions of processes related to life and organisms. He is also interested in re-orienting our conception of the universe as made fundamentally of things/substances, and replacing it with the idea the universe is made fundamentally of processes (process philosophy). In this episode, we both of those subjects, the why the "machine conception of the organism" is incorrect, how to apply these ideas to topics like neuroscience and artificial intelligence, and much more.





Dan's website. Google Scholar.Twitter: @NicholsonHPBioBookEverything Flows: Towards a Processual Philosophy of Biology.Related papersIs the Cell Really a Machine?The Machine Conception of the Organism in Development and Evolution: A Critical Analysis.On Being the Right Size, Revisited: The Problem with Engineering Metaphors in Molecular Biology.Related episode: BI 118 Johannes Jäger: Beyond Networks.



0:00 - Intro
2:49 - Philosophy and science
16:37 - Role of history
23:28 - What Is Life? And interaction with James Watson
38:37 - Arguments against the machine conception of organisms
49:08 - Organisms as streams (processes)
57:52 - Process philosophy
1:08:59 - Alfred North Whitehead
1:12:45 - Process and consciousness
1:22:16 - Artificial intelligence and process
1:31:47 - Language and symbols and processes]]>
                </itunes:summary>
                                                                            <itunes:duration>01:38:29</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 149 William B. Miller: Cell Intelligence]]>
                </title>
                <pubDate>Wed, 05 Oct 2022 17:20:46 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://permalink.castos.com/podcast/330/episode/1288812</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-149-william-b-miller-cell-intelligence</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener">Check out my free video series about what's missing in AI and Neuroscience</a></p>






<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>






<p>William B. Miller is an ex-physician turned evolutionary biologist. In this episode, we discuss topics related to his new book, <a href="https://amzn.to/3d99TLm">Bioverse: How the Cellular World Contains the Secrets to Life's Biggest Questions</a>. The premise of the book is that all individual cells are intelligent in their own right, and possess a sense of self. From this, Bill makes the case that cells cooperate with other cells to engineer whole organisms that in turn serve as wonderful hosts for the myriad cell types. Further, our bodies are collections of our own cells (with our DNA), and an enormous amount and diversity of foreign cells - our microbiome - that communicate and cooperate with each other and with our own cells. We also discuss how cell intelligence compares to human intelligence, what Bill calls the "era of the cell" in science, how the future of medicine will harness the intelligence of cells and their cooperative nature, and much more.</p>





<ul><li><a href="https://www.ourbioverse.com/">William's website</a>.</li><li>Twitter: <a href="https://twitter.com/billmillermd?lang=en">@BillMillerMD</a>.</li><li>Book: <a href="https://amzn.to/3d99TLm">Bioverse: How the Cellular World Contains the Secrets to Life's Biggest Questions</a>.</li></ul>



<p>0:00 - Intro
3:43 - Bioverse
7:29 - Bill's cell appreciation origins
17:03 - Microbiomes
27:01 - Complexity of microbiomes and the "Era of the cell"
46:00 - Robustness
55:05 - Cell vs. human intelligence
1:10:08 - Artificial intelligence
1:21:01 - Neuro-AI
1:25:53 - Hard problem of consciousness</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Check out my free video series about what's missing in AI and Neuroscience






Support the show to get full episodes and join the Discord community.










William B. Miller is an ex-physician turned evolutionary biologist. In this episode, we discuss topics related to his new book, Bioverse: How the Cellular World Contains the Secrets to Life's Biggest Questions. The premise of the book is that all individual cells are intelligent in their own right, and possess a sense of self. From this, Bill makes the case that cells cooperate with other cells to engineer whole organisms that in turn serve as wonderful hosts for the myriad cell types. Further, our bodies are collections of our own cells (with our DNA), and an enormous amount and diversity of foreign cells - our microbiome - that communicate and cooperate with each other and with our own cells. We also discuss how cell intelligence compares to human intelligence, what Bill calls the "era of the cell" in science, how the future of medicine will harness the intelligence of cells and their cooperative nature, and much more.





William's website.Twitter: @BillMillerMD.Book: Bioverse: How the Cellular World Contains the Secrets to Life's Biggest Questions.



0:00 - Intro
3:43 - Bioverse
7:29 - Bill's cell appreciation origins
17:03 - Microbiomes
27:01 - Complexity of microbiomes and the "Era of the cell"
46:00 - Robustness
55:05 - Cell vs. human intelligence
1:10:08 - Artificial intelligence
1:21:01 - Neuro-AI
1:25:53 - Hard problem of consciousness]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 149 William B. Miller: Cell Intelligence]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener">Check out my free video series about what's missing in AI and Neuroscience</a></p>






<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>






<p>William B. Miller is an ex-physician turned evolutionary biologist. In this episode, we discuss topics related to his new book, <a href="https://amzn.to/3d99TLm">Bioverse: How the Cellular World Contains the Secrets to Life's Biggest Questions</a>. The premise of the book is that all individual cells are intelligent in their own right, and possess a sense of self. From this, Bill makes the case that cells cooperate with other cells to engineer whole organisms that in turn serve as wonderful hosts for the myriad cell types. Further, our bodies are collections of our own cells (with our DNA), and an enormous amount and diversity of foreign cells - our microbiome - that communicate and cooperate with each other and with our own cells. We also discuss how cell intelligence compares to human intelligence, what Bill calls the "era of the cell" in science, how the future of medicine will harness the intelligence of cells and their cooperative nature, and much more.</p>





<ul><li><a href="https://www.ourbioverse.com/">William's website</a>.</li><li>Twitter: <a href="https://twitter.com/billmillermd?lang=en">@BillMillerMD</a>.</li><li>Book: <a href="https://amzn.to/3d99TLm">Bioverse: How the Cellular World Contains the Secrets to Life's Biggest Questions</a>.</li></ul>



<p>0:00 - Intro
3:43 - Bioverse
7:29 - Bill's cell appreciation origins
17:03 - Microbiomes
27:01 - Complexity of microbiomes and the "Era of the cell"
46:00 - Robustness
55:05 - Cell vs. human intelligence
1:10:08 - Artificial intelligence
1:21:01 - Neuro-AI
1:25:53 - Hard problem of consciousness</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/85f135e5-6704-4424-a940-47b9eb20e0eb-149-William-B.-Miller-public.mp3" length="90440241"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Check out my free video series about what's missing in AI and Neuroscience






Support the show to get full episodes and join the Discord community.










William B. Miller is an ex-physician turned evolutionary biologist. In this episode, we discuss topics related to his new book, Bioverse: How the Cellular World Contains the Secrets to Life's Biggest Questions. The premise of the book is that all individual cells are intelligent in their own right, and possess a sense of self. From this, Bill makes the case that cells cooperate with other cells to engineer whole organisms that in turn serve as wonderful hosts for the myriad cell types. Further, our bodies are collections of our own cells (with our DNA), and an enormous amount and diversity of foreign cells - our microbiome - that communicate and cooperate with each other and with our own cells. We also discuss how cell intelligence compares to human intelligence, what Bill calls the "era of the cell" in science, how the future of medicine will harness the intelligence of cells and their cooperative nature, and much more.





William's website.Twitter: @BillMillerMD.Book: Bioverse: How the Cellular World Contains the Secrets to Life's Biggest Questions.



0:00 - Intro
3:43 - Bioverse
7:29 - Bill's cell appreciation origins
17:03 - Microbiomes
27:01 - Complexity of microbiomes and the "Era of the cell"
46:00 - Robustness
55:05 - Cell vs. human intelligence
1:10:08 - Artificial intelligence
1:21:01 - Neuro-AI
1:25:53 - Hard problem of consciousness]]>
                </itunes:summary>
                                                                            <itunes:duration>01:33:54</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 148 Gaute Einevoll: Brain Simulations]]>
                </title>
                <pubDate>Sun, 25 Sep 2022 16:13:00 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-148-gaute-einevoll-brain-simulations</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-148-gaute-einevoll-brain-simulations</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener">Check out my free video series about what's missing in AI and Neuroscience</a></p>






<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>






<p>Gaute Einevoll is a professor at the University of Oslo and Norwegian University of Life Sciences. Use develops detailed models of brain networks to use as simulations, so neuroscientists can test their various theories and hypotheses about how networks implement various functions. Thus, the models are tools. The goal is to create models that are multi-level, to test questions at various levels of biological detail; and multi-modal, to predict that handful of signals neuroscientists measure from real brains (something Gaute calls "measurement physics"). We also discuss Gaute's thoughts on <a href="https://braininspired.co/podcast/141/">Carina Curto's "beautiful vs ugly models"</a>, and his reaction to <a href="https://braininspired.co/podcast/147/">Noah Hutton's In Silico documentary</a> about the Blue Brain and Human Brain projects (Gaute has been funded by the Human Brain Project since its inception).</p>



<ul><li><a href="https://www.mn.uio.no/compsci/english/people/supervisors/einevoll.html">Gaute's website</a>.</li><li>Twitter: <a href="https://twitter.com/gauteeinevoll">@GauteEinevoll</a>.</li><li>Related papers:<ul><li><a href="https://www.sciencedirect.com/science/article/pii/S0896627319302909?dgcid=api_sd_search-api-endpoint">The Scientific Case for Brain Simulations</a>.</li><li><a href="https://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1010353">Brain signal predictions from multi-scale networks using a linearized framework</a>.</li><li><a href="https://www.biorxiv.org/content/10.1101/2022.02.22.481540v1">Uncovering circuit mechanisms of current sinks and sources with biophysical simulations of primary visual cortex</a></li></ul></li><li><a href="http://LFPy.github.io">LFPy</a>: a Python module for calculation of extracellular potentials from multicompartment neuron models.</li><li>Gaute's <a href="https://vettogvitenskap.no/senseandscience/">Sense and Science</a> podcast.</li></ul>



<p>0:00 - Intro
3:25 - Beautiful and messy models
6:34 - In Silico
9:47 - Goals of human brain project
15:50 - Brain simulation approach
21:35 - Degeneracy in parameters
26:24 - Abstract principles from simulations
32:58 - Models as tools
35:34 - Predicting brain signals
41:45 - LFPs closer to average
53:57 - Plasticity in simulations
56:53 - How detailed should we model neurons?
59:09 - Lessons from predicting signals
1:06:07 - Scaling up
1:10:54 - Simulation as a tool
1:12:35 - Oscillations
1:16:24 - Manifolds and simulations
1:20:22 - Modeling cortex like Hodgkin and Huxley</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Check out my free video series about what's missing in AI and Neuroscience






Support the show to get full episodes and join the Discord community.










Gaute Einevoll is a professor at the University of Oslo and Norwegian University of Life Sciences. Use develops detailed models of brain networks to use as simulations, so neuroscientists can test their various theories and hypotheses about how networks implement various functions. Thus, the models are tools. The goal is to create models that are multi-level, to test questions at various levels of biological detail; and multi-modal, to predict that handful of signals neuroscientists measure from real brains (something Gaute calls "measurement physics"). We also discuss Gaute's thoughts on Carina Curto's "beautiful vs ugly models", and his reaction to Noah Hutton's In Silico documentary about the Blue Brain and Human Brain projects (Gaute has been funded by the Human Brain Project since its inception).



Gaute's website.Twitter: @GauteEinevoll.Related papers:The Scientific Case for Brain Simulations.Brain signal predictions from multi-scale networks using a linearized framework.Uncovering circuit mechanisms of current sinks and sources with biophysical simulations of primary visual cortexLFPy: a Python module for calculation of extracellular potentials from multicompartment neuron models.Gaute's Sense and Science podcast.



0:00 - Intro
3:25 - Beautiful and messy models
6:34 - In Silico
9:47 - Goals of human brain project
15:50 - Brain simulation approach
21:35 - Degeneracy in parameters
26:24 - Abstract principles from simulations
32:58 - Models as tools
35:34 - Predicting brain signals
41:45 - LFPs closer to average
53:57 - Plasticity in simulations
56:53 - How detailed should we model neurons?
59:09 - Lessons from predicting signals
1:06:07 - Scaling up
1:10:54 - Simulation as a tool
1:12:35 - Oscillations
1:16:24 - Manifolds and simulations
1:20:22 - Modeling cortex like Hodgkin and Huxley]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 148 Gaute Einevoll: Brain Simulations]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener">Check out my free video series about what's missing in AI and Neuroscience</a></p>






<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>






<p>Gaute Einevoll is a professor at the University of Oslo and Norwegian University of Life Sciences. Use develops detailed models of brain networks to use as simulations, so neuroscientists can test their various theories and hypotheses about how networks implement various functions. Thus, the models are tools. The goal is to create models that are multi-level, to test questions at various levels of biological detail; and multi-modal, to predict that handful of signals neuroscientists measure from real brains (something Gaute calls "measurement physics"). We also discuss Gaute's thoughts on <a href="https://braininspired.co/podcast/141/">Carina Curto's "beautiful vs ugly models"</a>, and his reaction to <a href="https://braininspired.co/podcast/147/">Noah Hutton's In Silico documentary</a> about the Blue Brain and Human Brain projects (Gaute has been funded by the Human Brain Project since its inception).</p>



<ul><li><a href="https://www.mn.uio.no/compsci/english/people/supervisors/einevoll.html">Gaute's website</a>.</li><li>Twitter: <a href="https://twitter.com/gauteeinevoll">@GauteEinevoll</a>.</li><li>Related papers:<ul><li><a href="https://www.sciencedirect.com/science/article/pii/S0896627319302909?dgcid=api_sd_search-api-endpoint">The Scientific Case for Brain Simulations</a>.</li><li><a href="https://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1010353">Brain signal predictions from multi-scale networks using a linearized framework</a>.</li><li><a href="https://www.biorxiv.org/content/10.1101/2022.02.22.481540v1">Uncovering circuit mechanisms of current sinks and sources with biophysical simulations of primary visual cortex</a></li></ul></li><li><a href="http://LFPy.github.io">LFPy</a>: a Python module for calculation of extracellular potentials from multicompartment neuron models.</li><li>Gaute's <a href="https://vettogvitenskap.no/senseandscience/">Sense and Science</a> podcast.</li></ul>



<p>0:00 - Intro
3:25 - Beautiful and messy models
6:34 - In Silico
9:47 - Goals of human brain project
15:50 - Brain simulation approach
21:35 - Degeneracy in parameters
26:24 - Abstract principles from simulations
32:58 - Models as tools
35:34 - Predicting brain signals
41:45 - LFPs closer to average
53:57 - Plasticity in simulations
56:53 - How detailed should we model neurons?
59:09 - Lessons from predicting signals
1:06:07 - Scaling up
1:10:54 - Simulation as a tool
1:12:35 - Oscillations
1:16:24 - Manifolds and simulations
1:20:22 - Modeling cortex like Hodgkin and Huxley</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/2580bc51-1569-45d8-bb53-75adcca58df8-148-Gaute-Einevoll-public.mp3" length="86929883"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Check out my free video series about what's missing in AI and Neuroscience






Support the show to get full episodes and join the Discord community.










Gaute Einevoll is a professor at the University of Oslo and Norwegian University of Life Sciences. Use develops detailed models of brain networks to use as simulations, so neuroscientists can test their various theories and hypotheses about how networks implement various functions. Thus, the models are tools. The goal is to create models that are multi-level, to test questions at various levels of biological detail; and multi-modal, to predict that handful of signals neuroscientists measure from real brains (something Gaute calls "measurement physics"). We also discuss Gaute's thoughts on Carina Curto's "beautiful vs ugly models", and his reaction to Noah Hutton's In Silico documentary about the Blue Brain and Human Brain projects (Gaute has been funded by the Human Brain Project since its inception).



Gaute's website.Twitter: @GauteEinevoll.Related papers:The Scientific Case for Brain Simulations.Brain signal predictions from multi-scale networks using a linearized framework.Uncovering circuit mechanisms of current sinks and sources with biophysical simulations of primary visual cortexLFPy: a Python module for calculation of extracellular potentials from multicompartment neuron models.Gaute's Sense and Science podcast.



0:00 - Intro
3:25 - Beautiful and messy models
6:34 - In Silico
9:47 - Goals of human brain project
15:50 - Brain simulation approach
21:35 - Degeneracy in parameters
26:24 - Abstract principles from simulations
32:58 - Models as tools
35:34 - Predicting brain signals
41:45 - LFPs closer to average
53:57 - Plasticity in simulations
56:53 - How detailed should we model neurons?
59:09 - Lessons from predicting signals
1:06:07 - Scaling up
1:10:54 - Simulation as a tool
1:12:35 - Oscillations
1:16:24 - Manifolds and simulations
1:20:22 - Modeling cortex like Hodgkin and Huxley]]>
                </itunes:summary>
                                                                            <itunes:duration>01:28:48</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 147 Noah Hutton: In Silico]]>
                </title>
                <pubDate>Tue, 13 Sep 2022 15:11:05 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-147-noah-hutton-in-silico</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-147-noah-hutton-in-silico</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener">Check out my free video series about what's missing in AI and Neuroscience</a></p>






<p class="has-text-align-center"><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community.</p>



<a href="https://www.patreon.com/braininspired"></a>






<p>Noah Hutton writes, directs, and scores documentary and narrative films. On this episode, we discuss his documentary In Silico. In 2009, Noah watched a TED talk by Henry Markram, in which Henry claimed it would take 10 years to fully simulate a human brain. This claim inspired Noah to chronicle the project, visiting Henry and his team periodically throughout. The result was In Silico, which tells the science, human, and social story of Henry's massively funded projects - the Blue Brain Project and the Human Brain Project.</p>





<ul><li><a href="https://insilicofilm.com/">In Silico website</a>.<ul><li><a href="https://vimeo.com/ondemand/insilico2" target="_blank" rel="noreferrer noopener">Rent or buy In Silico</a>.</li></ul></li><li><a href="http://noahhutton.com">Noah's website</a>.</li><li>Twitter: <a href="https://twitter.com/noah_hutton">@noah_hutton</a>.</li></ul>



<p>0:00 - Intro
3:36 - Release and premier
7:37 - Noah's background
9:52 - Origins of In Silico
19:39 - Recurring visits
22:13 - Including the critics
25:22 - Markram's shifting outlook and salesmanship
35:43 - Promises and delivery
41:28 - Computer and brain terms interchange
49:22 - Progress vs. illusion of progress
52:19 - Close to quitting
58:01 - Salesmanship vs bad at estimating timelines
1:02:12 - Brain simulation science
1:11:19 - AGI
1:14:48 - Brain simulation vs. neuro-AI
1:21:03 - Opinion on TED talks
1:25:16 - Hero worship
1:29:03 - Feedback on In Silico</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Check out my free video series about what's missing in AI and Neuroscience






Support the show to get full episodes and join the Discord community.










Noah Hutton writes, directs, and scores documentary and narrative films. On this episode, we discuss his documentary In Silico. In 2009, Noah watched a TED talk by Henry Markram, in which Henry claimed it would take 10 years to fully simulate a human brain. This claim inspired Noah to chronicle the project, visiting Henry and his team periodically throughout. The result was In Silico, which tells the science, human, and social story of Henry's massively funded projects - the Blue Brain Project and the Human Brain Project.





In Silico website.Rent or buy In Silico.Noah's website.Twitter: @noah_hutton.



0:00 - Intro
3:36 - Release and premier
7:37 - Noah's background
9:52 - Origins of In Silico
19:39 - Recurring visits
22:13 - Including the critics
25:22 - Markram's shifting outlook and salesmanship
35:43 - Promises and delivery
41:28 - Computer and brain terms interchange
49:22 - Progress vs. illusion of progress
52:19 - Close to quitting
58:01 - Salesmanship vs bad at estimating timelines
1:02:12 - Brain simulation science
1:11:19 - AGI
1:14:48 - Brain simulation vs. neuro-AI
1:21:03 - Opinion on TED talks
1:25:16 - Hero worship
1:29:03 - Feedback on In Silico]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 147 Noah Hutton: In Silico]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener">Check out my free video series about what's missing in AI and Neuroscience</a></p>






<p class="has-text-align-center"><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community.</p>



<a href="https://www.patreon.com/braininspired"></a>






<p>Noah Hutton writes, directs, and scores documentary and narrative films. On this episode, we discuss his documentary In Silico. In 2009, Noah watched a TED talk by Henry Markram, in which Henry claimed it would take 10 years to fully simulate a human brain. This claim inspired Noah to chronicle the project, visiting Henry and his team periodically throughout. The result was In Silico, which tells the science, human, and social story of Henry's massively funded projects - the Blue Brain Project and the Human Brain Project.</p>





<ul><li><a href="https://insilicofilm.com/">In Silico website</a>.<ul><li><a href="https://vimeo.com/ondemand/insilico2" target="_blank" rel="noreferrer noopener">Rent or buy In Silico</a>.</li></ul></li><li><a href="http://noahhutton.com">Noah's website</a>.</li><li>Twitter: <a href="https://twitter.com/noah_hutton">@noah_hutton</a>.</li></ul>



<p>0:00 - Intro
3:36 - Release and premier
7:37 - Noah's background
9:52 - Origins of In Silico
19:39 - Recurring visits
22:13 - Including the critics
25:22 - Markram's shifting outlook and salesmanship
35:43 - Promises and delivery
41:28 - Computer and brain terms interchange
49:22 - Progress vs. illusion of progress
52:19 - Close to quitting
58:01 - Salesmanship vs bad at estimating timelines
1:02:12 - Brain simulation science
1:11:19 - AGI
1:14:48 - Brain simulation vs. neuro-AI
1:21:03 - Opinion on TED talks
1:25:16 - Hero worship
1:29:03 - Feedback on In Silico</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/5d2da9c4-86c3-488b-b17e-1f8328b43223-147-Noah-Hutton.mp3" length="94725085"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Check out my free video series about what's missing in AI and Neuroscience






Support the show to get full episodes and join the Discord community.










Noah Hutton writes, directs, and scores documentary and narrative films. On this episode, we discuss his documentary In Silico. In 2009, Noah watched a TED talk by Henry Markram, in which Henry claimed it would take 10 years to fully simulate a human brain. This claim inspired Noah to chronicle the project, visiting Henry and his team periodically throughout. The result was In Silico, which tells the science, human, and social story of Henry's massively funded projects - the Blue Brain Project and the Human Brain Project.





In Silico website.Rent or buy In Silico.Noah's website.Twitter: @noah_hutton.



0:00 - Intro
3:36 - Release and premier
7:37 - Noah's background
9:52 - Origins of In Silico
19:39 - Recurring visits
22:13 - Including the critics
25:22 - Markram's shifting outlook and salesmanship
35:43 - Promises and delivery
41:28 - Computer and brain terms interchange
49:22 - Progress vs. illusion of progress
52:19 - Close to quitting
58:01 - Salesmanship vs bad at estimating timelines
1:02:12 - Brain simulation science
1:11:19 - AGI
1:14:48 - Brain simulation vs. neuro-AI
1:21:03 - Opinion on TED talks
1:25:16 - Hero worship
1:29:03 - Feedback on In Silico]]>
                </itunes:summary>
                                                                            <itunes:duration>01:37:08</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 146 Lauren Ross: Causal and Non-Causal Explanation]]>
                </title>
                <pubDate>Wed, 07 Sep 2022 14:35:40 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/146-lauren-ross-causal-and-non-causal-explanation</guid>
                                    <link>https://brain-inspired.castos.com/episodes/146-lauren-ross-causal-and-non-causal-explanation</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener">Check out my free video series about what's missing in AI and Neuroscience</a></p>






<p class="has-text-align-center"><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community.</p>



<a href="https://www.patreon.com/braininspired"></a>






<p>Lauren Ross is an Associate Professor at the University of California, Irvine. She studies and writes about causal and non-causal explanations in philosophy of science, including distinctions among causal structures. Throughout her work, Lauren employs Jame's Woodward's interventionist approach to causation, which <a href="https://braininspired.co/podcast/145/">Jim and I discussed in episode 145</a>. In this episode, we discuss Jim's lasting impact on the philosophy of causation, the current dominance of mechanistic explanation and its relation to causation, and various causal structures of explanation, including pathways, cascades, topology, and constraints.</p>



<ul><li><a href="https://www.lps.uci.edu/~rossl/">Lauren's website</a>.</li><li>Twitter: <a href="https://twitter.com/ProfLaurenRoss">@ProfLaurenRoss</a></li><li>Related papers<ul><li><a href="https://ekmillerlab.mit.edu/wp-content/uploads/2022/07/A-call-for-more-clarity-around-causality-in-neuroscience-TINS-2022.pdf">A call for more clarity around causality in neuroscience</a>.</li><li><a href="http://philsci-archive.pitt.edu/18504/1/Constraints_Ross.pdf">The explanatory nature of constraints: Law-based, mathematical, and causal</a>.</li><li><a href="http://philsci-archive.pitt.edu/14432/1/Mech_Path_.pdf">Causal Concepts in Biology: How Pathways Differ from Mechanisms and Why It Matters</a>.</li><li><a href="https://www.lps.uci.edu/~rossl/A11_Ross.pdf">Distinguishing topological and causal explanation</a>.</li><li><a href="https://www.lps.uci.edu/~rossl/A9_Ross.pdf">Multiple Realizability from a Causal Perspective</a>.</li><li><a href="http://philsci-archive.pitt.edu/20215/1/Ross_Cascade.pdf">Cascade versus mechanism: The diversity of causal structure in science</a>.</li></ul></li></ul>



<p>0:00 - Intro
2:46 - Lauren's background
10:14 - Jim Woodward legacy
15:37 - Golden era of causality
18:56 - Mechanistic explanation
28:51 - Pathways
31:41 - Cascades
36:25 - Topology
41:17 - Constraint
50:44 - Hierarchy of explanations
53:18 - Structure and function
57:49 - Brain and mind
1:01:28 - Reductionism
1:07:58 - Constraint again
1:14:38 - Multiple realizability</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Check out my free video series about what's missing in AI and Neuroscience






Support the show to get full episodes and join the Discord community.










Lauren Ross is an Associate Professor at the University of California, Irvine. She studies and writes about causal and non-causal explanations in philosophy of science, including distinctions among causal structures. Throughout her work, Lauren employs Jame's Woodward's interventionist approach to causation, which Jim and I discussed in episode 145. In this episode, we discuss Jim's lasting impact on the philosophy of causation, the current dominance of mechanistic explanation and its relation to causation, and various causal structures of explanation, including pathways, cascades, topology, and constraints.



Lauren's website.Twitter: @ProfLaurenRossRelated papersA call for more clarity around causality in neuroscience.The explanatory nature of constraints: Law-based, mathematical, and causal.Causal Concepts in Biology: How Pathways Differ from Mechanisms and Why It Matters.Distinguishing topological and causal explanation.Multiple Realizability from a Causal Perspective.Cascade versus mechanism: The diversity of causal structure in science.



0:00 - Intro
2:46 - Lauren's background
10:14 - Jim Woodward legacy
15:37 - Golden era of causality
18:56 - Mechanistic explanation
28:51 - Pathways
31:41 - Cascades
36:25 - Topology
41:17 - Constraint
50:44 - Hierarchy of explanations
53:18 - Structure and function
57:49 - Brain and mind
1:01:28 - Reductionism
1:07:58 - Constraint again
1:14:38 - Multiple realizability]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 146 Lauren Ross: Causal and Non-Causal Explanation]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener">Check out my free video series about what's missing in AI and Neuroscience</a></p>






<p class="has-text-align-center"><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community.</p>



<a href="https://www.patreon.com/braininspired"></a>






<p>Lauren Ross is an Associate Professor at the University of California, Irvine. She studies and writes about causal and non-causal explanations in philosophy of science, including distinctions among causal structures. Throughout her work, Lauren employs Jame's Woodward's interventionist approach to causation, which <a href="https://braininspired.co/podcast/145/">Jim and I discussed in episode 145</a>. In this episode, we discuss Jim's lasting impact on the philosophy of causation, the current dominance of mechanistic explanation and its relation to causation, and various causal structures of explanation, including pathways, cascades, topology, and constraints.</p>



<ul><li><a href="https://www.lps.uci.edu/~rossl/">Lauren's website</a>.</li><li>Twitter: <a href="https://twitter.com/ProfLaurenRoss">@ProfLaurenRoss</a></li><li>Related papers<ul><li><a href="https://ekmillerlab.mit.edu/wp-content/uploads/2022/07/A-call-for-more-clarity-around-causality-in-neuroscience-TINS-2022.pdf">A call for more clarity around causality in neuroscience</a>.</li><li><a href="http://philsci-archive.pitt.edu/18504/1/Constraints_Ross.pdf">The explanatory nature of constraints: Law-based, mathematical, and causal</a>.</li><li><a href="http://philsci-archive.pitt.edu/14432/1/Mech_Path_.pdf">Causal Concepts in Biology: How Pathways Differ from Mechanisms and Why It Matters</a>.</li><li><a href="https://www.lps.uci.edu/~rossl/A11_Ross.pdf">Distinguishing topological and causal explanation</a>.</li><li><a href="https://www.lps.uci.edu/~rossl/A9_Ross.pdf">Multiple Realizability from a Causal Perspective</a>.</li><li><a href="http://philsci-archive.pitt.edu/20215/1/Ross_Cascade.pdf">Cascade versus mechanism: The diversity of causal structure in science</a>.</li></ul></li></ul>



<p>0:00 - Intro
2:46 - Lauren's background
10:14 - Jim Woodward legacy
15:37 - Golden era of causality
18:56 - Mechanistic explanation
28:51 - Pathways
31:41 - Cascades
36:25 - Topology
41:17 - Constraint
50:44 - Hierarchy of explanations
53:18 - Structure and function
57:49 - Brain and mind
1:01:28 - Reductionism
1:07:58 - Constraint again
1:14:38 - Multiple realizability</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/9d346bd2-4145-4d04-ade7-675b42119cce-146-Lauren-Ross-public.mp3" length="79834106"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Check out my free video series about what's missing in AI and Neuroscience






Support the show to get full episodes and join the Discord community.










Lauren Ross is an Associate Professor at the University of California, Irvine. She studies and writes about causal and non-causal explanations in philosophy of science, including distinctions among causal structures. Throughout her work, Lauren employs Jame's Woodward's interventionist approach to causation, which Jim and I discussed in episode 145. In this episode, we discuss Jim's lasting impact on the philosophy of causation, the current dominance of mechanistic explanation and its relation to causation, and various causal structures of explanation, including pathways, cascades, topology, and constraints.



Lauren's website.Twitter: @ProfLaurenRossRelated papersA call for more clarity around causality in neuroscience.The explanatory nature of constraints: Law-based, mathematical, and causal.Causal Concepts in Biology: How Pathways Differ from Mechanisms and Why It Matters.Distinguishing topological and causal explanation.Multiple Realizability from a Causal Perspective.Cascade versus mechanism: The diversity of causal structure in science.



0:00 - Intro
2:46 - Lauren's background
10:14 - Jim Woodward legacy
15:37 - Golden era of causality
18:56 - Mechanistic explanation
28:51 - Pathways
31:41 - Cascades
36:25 - Topology
41:17 - Constraint
50:44 - Hierarchy of explanations
53:18 - Structure and function
57:49 - Brain and mind
1:01:28 - Reductionism
1:07:58 - Constraint again
1:14:38 - Multiple realizability]]>
                </itunes:summary>
                                                                            <itunes:duration>01:22:51</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 145 James Woodward: Causation with a Human Face]]>
                </title>
                <pubDate>Sun, 28 Aug 2022 21:03:37 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-145-james-woodward-causation-with-a-human-face</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-145-james-woodward-causation-with-a-human-face</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener">Check out my free video series about what's missing in AI and Neuroscience</a></p>






<p class="has-text-align-center"><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community.</p>



<a href="https://www.patreon.com/braininspired"></a>






<p>James Woodward is a recently retired Professor from the Department of History and Philosophy of Science at the University of Pittsburgh. Jim has tremendously influenced the field of causal explanation in the philosophy of science. His account of causation centers around intervention - intervening on a cause should alter its effect. From this minimal notion, Jim has described many facets and varieties of causal structures. In this episode, we discuss topics from his recent book, <a href="https://amzn.to/3pMAwZv">Causation with a Human Face: Normative Theory and Descriptive Psychology</a>. In the book, Jim advocates that how we should think about causality - the normative - needs to be studied together with how we actually do think about causal relations in the world - the descriptive. We discuss many topics around this central notion, epistemology versus metaphysics, the the nature and varieties of causal structures.</p>





<ul><li><a href="https://www.jameswoodward.org/">Jim's website</a>.</li><li><a href="https://amzn.to/3QxPMEZ">Making Things Happen: A Theory of Causal Explanation</a>.</li><li><a href="https://amzn.to/3pMAwZv">Causation with a Human Face: Normative Theory and Descriptive Psychology</a>.</li></ul>



<p>0:00 - Intro
4:14 - Causation with a Human Face &amp; Functionalist approach
6:16 - Interventionist causality; Epistemology and metaphysics
9:35 - Normative and descriptive
14:02 - Rationalist approach
20:24 - Normative vs. descriptive
28:00 - Varying notions of causation
33:18 - Invariance
41:05 - Causality in complex systems
47:09 - Downward causation
51:14 - Natural laws
56:38 - Proportionality
1:01:12 - Intuitions
1:10:59 - Normative and descriptive relation
1:17:33 - Causality across disciplines
1:21:26 - What would help our understanding of causation</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Check out my free video series about what's missing in AI and Neuroscience






Support the show to get full episodes and join the Discord community.










James Woodward is a recently retired Professor from the Department of History and Philosophy of Science at the University of Pittsburgh. Jim has tremendously influenced the field of causal explanation in the philosophy of science. His account of causation centers around intervention - intervening on a cause should alter its effect. From this minimal notion, Jim has described many facets and varieties of causal structures. In this episode, we discuss topics from his recent book, Causation with a Human Face: Normative Theory and Descriptive Psychology. In the book, Jim advocates that how we should think about causality - the normative - needs to be studied together with how we actually do think about causal relations in the world - the descriptive. We discuss many topics around this central notion, epistemology versus metaphysics, the the nature and varieties of causal structures.





Jim's website.Making Things Happen: A Theory of Causal Explanation.Causation with a Human Face: Normative Theory and Descriptive Psychology.



0:00 - Intro
4:14 - Causation with a Human Face & Functionalist approach
6:16 - Interventionist causality; Epistemology and metaphysics
9:35 - Normative and descriptive
14:02 - Rationalist approach
20:24 - Normative vs. descriptive
28:00 - Varying notions of causation
33:18 - Invariance
41:05 - Causality in complex systems
47:09 - Downward causation
51:14 - Natural laws
56:38 - Proportionality
1:01:12 - Intuitions
1:10:59 - Normative and descriptive relation
1:17:33 - Causality across disciplines
1:21:26 - What would help our understanding of causation]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 145 James Woodward: Causation with a Human Face]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener">Check out my free video series about what's missing in AI and Neuroscience</a></p>






<p class="has-text-align-center"><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community.</p>



<a href="https://www.patreon.com/braininspired"></a>






<p>James Woodward is a recently retired Professor from the Department of History and Philosophy of Science at the University of Pittsburgh. Jim has tremendously influenced the field of causal explanation in the philosophy of science. His account of causation centers around intervention - intervening on a cause should alter its effect. From this minimal notion, Jim has described many facets and varieties of causal structures. In this episode, we discuss topics from his recent book, <a href="https://amzn.to/3pMAwZv">Causation with a Human Face: Normative Theory and Descriptive Psychology</a>. In the book, Jim advocates that how we should think about causality - the normative - needs to be studied together with how we actually do think about causal relations in the world - the descriptive. We discuss many topics around this central notion, epistemology versus metaphysics, the the nature and varieties of causal structures.</p>





<ul><li><a href="https://www.jameswoodward.org/">Jim's website</a>.</li><li><a href="https://amzn.to/3QxPMEZ">Making Things Happen: A Theory of Causal Explanation</a>.</li><li><a href="https://amzn.to/3pMAwZv">Causation with a Human Face: Normative Theory and Descriptive Psychology</a>.</li></ul>



<p>0:00 - Intro
4:14 - Causation with a Human Face &amp; Functionalist approach
6:16 - Interventionist causality; Epistemology and metaphysics
9:35 - Normative and descriptive
14:02 - Rationalist approach
20:24 - Normative vs. descriptive
28:00 - Varying notions of causation
33:18 - Invariance
41:05 - Causality in complex systems
47:09 - Downward causation
51:14 - Natural laws
56:38 - Proportionality
1:01:12 - Intuitions
1:10:59 - Normative and descriptive relation
1:17:33 - Causality across disciplines
1:21:26 - What would help our understanding of causation</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/842c91a9-340f-46fc-9333-1261ce7bca72-145-James-Woodward-public.mp3" length="82739467"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Check out my free video series about what's missing in AI and Neuroscience






Support the show to get full episodes and join the Discord community.










James Woodward is a recently retired Professor from the Department of History and Philosophy of Science at the University of Pittsburgh. Jim has tremendously influenced the field of causal explanation in the philosophy of science. His account of causation centers around intervention - intervening on a cause should alter its effect. From this minimal notion, Jim has described many facets and varieties of causal structures. In this episode, we discuss topics from his recent book, Causation with a Human Face: Normative Theory and Descriptive Psychology. In the book, Jim advocates that how we should think about causality - the normative - needs to be studied together with how we actually do think about causal relations in the world - the descriptive. We discuss many topics around this central notion, epistemology versus metaphysics, the the nature and varieties of causal structures.





Jim's website.Making Things Happen: A Theory of Causal Explanation.Causation with a Human Face: Normative Theory and Descriptive Psychology.



0:00 - Intro
4:14 - Causation with a Human Face & Functionalist approach
6:16 - Interventionist causality; Epistemology and metaphysics
9:35 - Normative and descriptive
14:02 - Rationalist approach
20:24 - Normative vs. descriptive
28:00 - Varying notions of causation
33:18 - Invariance
41:05 - Causality in complex systems
47:09 - Downward causation
51:14 - Natural laws
56:38 - Proportionality
1:01:12 - Intuitions
1:10:59 - Normative and descriptive relation
1:17:33 - Causality across disciplines
1:21:26 - What would help our understanding of causation]]>
                </itunes:summary>
                                                                            <itunes:duration>01:25:52</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 144 Emily M. Bender and Ev Fedorenko: Large Language Models]]>
                </title>
                <pubDate>Wed, 17 Aug 2022 16:25:00 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-144-emily-m-bender-and-ev-fedorenko-large-langu0gt</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-144-emily-m-bender-and-ev-fedorenko-large-langu0gt</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener">Check out my short video series about what's missing in AI and Neuroscience.</a></p>





<p class="has-text-align-center"><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community.</p>



<a href="https://www.patreon.com/braininspired"></a>




<p>Large language models, often now called "foundation models", are the model de jour in AI, based on the <a href="https://en.wikipedia.org/wiki/Transformer_(machine_learning_model)">transformer architecture</a>. In this episode, I bring together Evelina Fedorenko and Emily M. Bender to discuss how language models stack up to our own language processing and generation (models and brains both excel at next-word prediction), whether language evolved in humans for complex thoughts or for communication (communication, says Ev), whether language models grasp the meaning of the text they produce (Emily says no), and much more.</p>





<p>Evelina Fedorenko is a cognitive scientist who runs the EvLab at MIT. She studies the neural basis of language. Her lab has amassed a large amount of data suggesting language did not evolve to help us think complex thoughts, as Noam Chomsky has argued, but rather for efficient communication. She has also recently been comparing the activity in language models to activity in our brain's language network, finding commonality in the ability to predict upcoming words.</p>





<p>Emily M. Bender is a computational linguist at University of Washington. Recently she has been considering questions about whether language models understand the meaning of the language they produce (no), whether we should be scaling language models as is the current practice (not really), how linguistics can inform language models, and more.</p>



<ul><li><a href="http://evlab.mit.edu/">EvLab</a>.</li><li><a href="http://faculty.washington.edu/ebender/">Emily's website</a>.</li><li>Twitter: <a href="https://twitter.com/ev_fedorenko">@ev_fedorenko</a>; <a href="https://twitter.com/emilymbender">@emilymbender</a>.</li><li>Related papers<ul><li><a href="http://evlab.mit.edu/assets/papers/Fedorenko_%26_Varley_2016_ANYAS.pdf">Language and thought are not the same thing: Evidence from neuroimaging and neurological patients</a>. (Fedorenko)</li><li><a href="http://evlab.mit.edu/assets/papers/Schrimpf_et_al_2021_PNAS.pdf">The neural architecture of language: Integrative modeling converges on predictive processing.</a> (Fedorenko)</li><li><a href="https://dl.acm.org/doi/10.1145/3442188.3445922">On the Dangers of Stochastic Parrots: Can Language Models Be Too Big?</a> (Bender)</li><li><a href="https://aclanthology.org/2020.acl-main.463/">Climbing towards NLU: On Meaning, Form, and Understanding in the Age of Data</a> (Bender)</li></ul></li></ul>



<p>0:00 - Intro
4:35 - Language and cognition
15:38 - Grasping for meaning
21:32 - Are large language models producing language?
23:09 - Next-word prediction in brains and models
32:09 - Interface between language and thought
35:18 - Studying language in nonhuman animals
41:54 - Do we understand language enough?
45:51 - What do language models need?
51:45 - Are LLMs teaching us about language?
54:56 - Is meaning necessary, and does it matter how we learn language?
1:00:04 - Is our biology important for language?
1:04:59 - Future outlook</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Check out my short video series about what's missing in AI and Neuroscience.





Support the show to get full episodes and join the Discord community.








Large language models, often now called "foundation models", are the model de jour in AI, based on the transformer architecture. In this episode, I bring together Evelina Fedorenko and Emily M. Bender to discuss how language models stack up to our own language processing and generation (models and brains both excel at next-word prediction), whether language evolved in humans for complex thoughts or for communication (communication, says Ev), whether language models grasp the meaning of the text they produce (Emily says no), and much more.





Evelina Fedorenko is a cognitive scientist who runs the EvLab at MIT. She studies the neural basis of language. Her lab has amassed a large amount of data suggesting language did not evolve to help us think complex thoughts, as Noam Chomsky has argued, but rather for efficient communication. She has also recently been comparing the activity in language models to activity in our brain's language network, finding commonality in the ability to predict upcoming words.





Emily M. Bender is a computational linguist at University of Washington. Recently she has been considering questions about whether language models understand the meaning of the language they produce (no), whether we should be scaling language models as is the current practice (not really), how linguistics can inform language models, and more.



EvLab.Emily's website.Twitter: @ev_fedorenko; @emilymbender.Related papersLanguage and thought are not the same thing: Evidence from neuroimaging and neurological patients. (Fedorenko)The neural architecture of language: Integrative modeling converges on predictive processing. (Fedorenko)On the Dangers of Stochastic Parrots: Can Language Models Be Too Big? (Bender)Climbing towards NLU: On Meaning, Form, and Understanding in the Age of Data (Bender)



0:00 - Intro
4:35 - Language and cognition
15:38 - Grasping for meaning
21:32 - Are large language models producing language?
23:09 - Next-word prediction in brains and models
32:09 - Interface between language and thought
35:18 - Studying language in nonhuman animals
41:54 - Do we understand language enough?
45:51 - What do language models need?
51:45 - Are LLMs teaching us about language?
54:56 - Is meaning necessary, and does it matter how we learn language?
1:00:04 - Is our biology important for language?
1:04:59 - Future outlook]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 144 Emily M. Bender and Ev Fedorenko: Large Language Models]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener">Check out my short video series about what's missing in AI and Neuroscience.</a></p>





<p class="has-text-align-center"><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community.</p>



<a href="https://www.patreon.com/braininspired"></a>




<p>Large language models, often now called "foundation models", are the model de jour in AI, based on the <a href="https://en.wikipedia.org/wiki/Transformer_(machine_learning_model)">transformer architecture</a>. In this episode, I bring together Evelina Fedorenko and Emily M. Bender to discuss how language models stack up to our own language processing and generation (models and brains both excel at next-word prediction), whether language evolved in humans for complex thoughts or for communication (communication, says Ev), whether language models grasp the meaning of the text they produce (Emily says no), and much more.</p>





<p>Evelina Fedorenko is a cognitive scientist who runs the EvLab at MIT. She studies the neural basis of language. Her lab has amassed a large amount of data suggesting language did not evolve to help us think complex thoughts, as Noam Chomsky has argued, but rather for efficient communication. She has also recently been comparing the activity in language models to activity in our brain's language network, finding commonality in the ability to predict upcoming words.</p>





<p>Emily M. Bender is a computational linguist at University of Washington. Recently she has been considering questions about whether language models understand the meaning of the language they produce (no), whether we should be scaling language models as is the current practice (not really), how linguistics can inform language models, and more.</p>



<ul><li><a href="http://evlab.mit.edu/">EvLab</a>.</li><li><a href="http://faculty.washington.edu/ebender/">Emily's website</a>.</li><li>Twitter: <a href="https://twitter.com/ev_fedorenko">@ev_fedorenko</a>; <a href="https://twitter.com/emilymbender">@emilymbender</a>.</li><li>Related papers<ul><li><a href="http://evlab.mit.edu/assets/papers/Fedorenko_%26_Varley_2016_ANYAS.pdf">Language and thought are not the same thing: Evidence from neuroimaging and neurological patients</a>. (Fedorenko)</li><li><a href="http://evlab.mit.edu/assets/papers/Schrimpf_et_al_2021_PNAS.pdf">The neural architecture of language: Integrative modeling converges on predictive processing.</a> (Fedorenko)</li><li><a href="https://dl.acm.org/doi/10.1145/3442188.3445922">On the Dangers of Stochastic Parrots: Can Language Models Be Too Big?</a> (Bender)</li><li><a href="https://aclanthology.org/2020.acl-main.463/">Climbing towards NLU: On Meaning, Form, and Understanding in the Age of Data</a> (Bender)</li></ul></li></ul>



<p>0:00 - Intro
4:35 - Language and cognition
15:38 - Grasping for meaning
21:32 - Are large language models producing language?
23:09 - Next-word prediction in brains and models
32:09 - Interface between language and thought
35:18 - Studying language in nonhuman animals
41:54 - Do we understand language enough?
45:51 - What do language models need?
51:45 - Are LLMs teaching us about language?
54:56 - Is meaning necessary, and does it matter how we learn language?
1:00:04 - Is our biology important for language?
1:04:59 - Future outlook</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/a976855a-58b4-42da-bc6f-6aa05fd5cdca-144-Emily-Bender-Ev-Fedorenko.mp3" length="69116190"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Check out my short video series about what's missing in AI and Neuroscience.





Support the show to get full episodes and join the Discord community.








Large language models, often now called "foundation models", are the model de jour in AI, based on the transformer architecture. In this episode, I bring together Evelina Fedorenko and Emily M. Bender to discuss how language models stack up to our own language processing and generation (models and brains both excel at next-word prediction), whether language evolved in humans for complex thoughts or for communication (communication, says Ev), whether language models grasp the meaning of the text they produce (Emily says no), and much more.





Evelina Fedorenko is a cognitive scientist who runs the EvLab at MIT. She studies the neural basis of language. Her lab has amassed a large amount of data suggesting language did not evolve to help us think complex thoughts, as Noam Chomsky has argued, but rather for efficient communication. She has also recently been comparing the activity in language models to activity in our brain's language network, finding commonality in the ability to predict upcoming words.





Emily M. Bender is a computational linguist at University of Washington. Recently she has been considering questions about whether language models understand the meaning of the language they produce (no), whether we should be scaling language models as is the current practice (not really), how linguistics can inform language models, and more.



EvLab.Emily's website.Twitter: @ev_fedorenko; @emilymbender.Related papersLanguage and thought are not the same thing: Evidence from neuroimaging and neurological patients. (Fedorenko)The neural architecture of language: Integrative modeling converges on predictive processing. (Fedorenko)On the Dangers of Stochastic Parrots: Can Language Models Be Too Big? (Bender)Climbing towards NLU: On Meaning, Form, and Understanding in the Age of Data (Bender)



0:00 - Intro
4:35 - Language and cognition
15:38 - Grasping for meaning
21:32 - Are large language models producing language?
23:09 - Next-word prediction in brains and models
32:09 - Interface between language and thought
35:18 - Studying language in nonhuman animals
41:54 - Do we understand language enough?
45:51 - What do language models need?
51:45 - Are LLMs teaching us about language?
54:56 - Is meaning necessary, and does it matter how we learn language?
1:00:04 - Is our biology important for language?
1:04:59 - Future outlook]]>
                </itunes:summary>
                                                                            <itunes:duration>01:11:41</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 143 Rodolphe Sepulchre: Mixed Feedback Control]]>
                </title>
                <pubDate>Fri, 05 Aug 2022 23:15:10 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-143-rodolphe-sepulchre-mixed-feedback-control</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-143-rodolphe-sepulchre-mixed-feedback-control</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener">Check out my free video series about what's missing in AI and Neuroscience</a></p>






<p class="has-text-align-center"><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community.</p>



<a href="https://www.patreon.com/braininspired"></a>






<p>Rodolphe Sepulchre is a control engineer and theorist at Cambridge University. He focuses on applying feedback control engineering principles to build circuits that model neurons and neuronal circuits. We discuss his work on mixed feedback control - positive and negative - as an underlying principle of the mixed digital and analog brain signals,, the role of neuromodulation as a controller, applying these principles to Eve Marder's lobster/crab neural circuits, building mixed-feedback neuromorphics, some feedback control history, and how "If you wish to contribute original work, be prepared to face loneliness," among other topics.</p>



<ul><li><a href="https://sites.google.com/site/rsepulchre/home">Rodolphe's website</a>.</li><li>Related papers<ul><li><a href="https://arxiv.org/abs/2112.03565">Spiking Control Systems</a>.</li><li><a href="https://www.annualreviews.org/doi/full/10.1146/annurev-control-053018-023708">Control Across Scales by Positive and Negative Feedback</a>.</li><li><a href="https://www.google.com/url?q=https%3A%2F%2Fwww.dropbox.com%2Fs%2Fljouwdgrs06sx8e%2FNeuromorphic_Control_Designing_Multiscale_Mixed-Feedback_Systems.pdf%3Fdl%3D0&amp;sa=D&amp;sntz=1&amp;usg=AOvVaw0WM3AsKkZ9LeYw9xArCJy-">Neuromorphic control</a>. (<a href="http://www.google.com/url?q=http%3A%2F%2Farxiv.org%2Fabs%2F2011.04441&amp;sa=D&amp;sntz=1&amp;usg=AOvVaw2dfjmxFAZvv6glFuTGm3IG">arXiv version</a>)</li></ul></li><li>Related episodes:<ul><li><a href="https://braininspired.co/podcast/130/">BI 130 Eve Marder: Modulation of Networks</a></li><li><a href="https://braininspired.co/podcast/119/">BI 119 Henry Yin: The Crisis in Neuroscience</a></li></ul></li></ul>



<p>0:00 - Intro
4:38 - Control engineer
9:52 - Control vs. dynamical systems
13:34 - Building vs. understanding
17:38 - Mixed feedback signals
26:00 - Robustness
28:28 - Eve Marder
32:00 - Loneliness
37:35 - Across levels
44:04 - Neuromorphics and neuromodulation
52:15 - Barrier to adopting neuromorphics
54:40 - Deep learning influence
58:04 - Beyond energy efficiency
1:02:02 - Deep learning for neuro
1:14:15 - Role of philosophy
1:16:43 - Doing it right</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Check out my free video series about what's missing in AI and Neuroscience






Support the show to get full episodes and join the Discord community.










Rodolphe Sepulchre is a control engineer and theorist at Cambridge University. He focuses on applying feedback control engineering principles to build circuits that model neurons and neuronal circuits. We discuss his work on mixed feedback control - positive and negative - as an underlying principle of the mixed digital and analog brain signals,, the role of neuromodulation as a controller, applying these principles to Eve Marder's lobster/crab neural circuits, building mixed-feedback neuromorphics, some feedback control history, and how "If you wish to contribute original work, be prepared to face loneliness," among other topics.



Rodolphe's website.Related papersSpiking Control Systems.Control Across Scales by Positive and Negative Feedback.Neuromorphic control. (arXiv version)Related episodes:BI 130 Eve Marder: Modulation of NetworksBI 119 Henry Yin: The Crisis in Neuroscience



0:00 - Intro
4:38 - Control engineer
9:52 - Control vs. dynamical systems
13:34 - Building vs. understanding
17:38 - Mixed feedback signals
26:00 - Robustness
28:28 - Eve Marder
32:00 - Loneliness
37:35 - Across levels
44:04 - Neuromorphics and neuromodulation
52:15 - Barrier to adopting neuromorphics
54:40 - Deep learning influence
58:04 - Beyond energy efficiency
1:02:02 - Deep learning for neuro
1:14:15 - Role of philosophy
1:16:43 - Doing it right]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 143 Rodolphe Sepulchre: Mixed Feedback Control]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener">Check out my free video series about what's missing in AI and Neuroscience</a></p>






<p class="has-text-align-center"><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community.</p>



<a href="https://www.patreon.com/braininspired"></a>






<p>Rodolphe Sepulchre is a control engineer and theorist at Cambridge University. He focuses on applying feedback control engineering principles to build circuits that model neurons and neuronal circuits. We discuss his work on mixed feedback control - positive and negative - as an underlying principle of the mixed digital and analog brain signals,, the role of neuromodulation as a controller, applying these principles to Eve Marder's lobster/crab neural circuits, building mixed-feedback neuromorphics, some feedback control history, and how "If you wish to contribute original work, be prepared to face loneliness," among other topics.</p>



<ul><li><a href="https://sites.google.com/site/rsepulchre/home">Rodolphe's website</a>.</li><li>Related papers<ul><li><a href="https://arxiv.org/abs/2112.03565">Spiking Control Systems</a>.</li><li><a href="https://www.annualreviews.org/doi/full/10.1146/annurev-control-053018-023708">Control Across Scales by Positive and Negative Feedback</a>.</li><li><a href="https://www.google.com/url?q=https%3A%2F%2Fwww.dropbox.com%2Fs%2Fljouwdgrs06sx8e%2FNeuromorphic_Control_Designing_Multiscale_Mixed-Feedback_Systems.pdf%3Fdl%3D0&amp;sa=D&amp;sntz=1&amp;usg=AOvVaw0WM3AsKkZ9LeYw9xArCJy-">Neuromorphic control</a>. (<a href="http://www.google.com/url?q=http%3A%2F%2Farxiv.org%2Fabs%2F2011.04441&amp;sa=D&amp;sntz=1&amp;usg=AOvVaw2dfjmxFAZvv6glFuTGm3IG">arXiv version</a>)</li></ul></li><li>Related episodes:<ul><li><a href="https://braininspired.co/podcast/130/">BI 130 Eve Marder: Modulation of Networks</a></li><li><a href="https://braininspired.co/podcast/119/">BI 119 Henry Yin: The Crisis in Neuroscience</a></li></ul></li></ul>



<p>0:00 - Intro
4:38 - Control engineer
9:52 - Control vs. dynamical systems
13:34 - Building vs. understanding
17:38 - Mixed feedback signals
26:00 - Robustness
28:28 - Eve Marder
32:00 - Loneliness
37:35 - Across levels
44:04 - Neuromorphics and neuromodulation
52:15 - Barrier to adopting neuromorphics
54:40 - Deep learning influence
58:04 - Beyond energy efficiency
1:02:02 - Deep learning for neuro
1:14:15 - Role of philosophy
1:16:43 - Doing it right</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/54e830c5-e876-4d27-a6f7-41b283a43211-143-Rodolphe-Sepulchre.mp3" length="81786561"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Check out my free video series about what's missing in AI and Neuroscience






Support the show to get full episodes and join the Discord community.










Rodolphe Sepulchre is a control engineer and theorist at Cambridge University. He focuses on applying feedback control engineering principles to build circuits that model neurons and neuronal circuits. We discuss his work on mixed feedback control - positive and negative - as an underlying principle of the mixed digital and analog brain signals,, the role of neuromodulation as a controller, applying these principles to Eve Marder's lobster/crab neural circuits, building mixed-feedback neuromorphics, some feedback control history, and how "If you wish to contribute original work, be prepared to face loneliness," among other topics.



Rodolphe's website.Related papersSpiking Control Systems.Control Across Scales by Positive and Negative Feedback.Neuromorphic control. (arXiv version)Related episodes:BI 130 Eve Marder: Modulation of NetworksBI 119 Henry Yin: The Crisis in Neuroscience



0:00 - Intro
4:38 - Control engineer
9:52 - Control vs. dynamical systems
13:34 - Building vs. understanding
17:38 - Mixed feedback signals
26:00 - Robustness
28:28 - Eve Marder
32:00 - Loneliness
37:35 - Across levels
44:04 - Neuromorphics and neuromodulation
52:15 - Barrier to adopting neuromorphics
54:40 - Deep learning influence
58:04 - Beyond energy efficiency
1:02:02 - Deep learning for neuro
1:14:15 - Role of philosophy
1:16:43 - Doing it right]]>
                </itunes:summary>
                                                                            <itunes:duration>01:24:53</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 142 Cameron Buckner: The New DoGMA]]>
                </title>
                <pubDate>Tue, 26 Jul 2022 17:54:31 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-142-cameron-buckner-the-new-dogma</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-142-cameron-buckner-the-new-dogma</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener">Check out my free video series about what's missing in AI and Neuroscience</a></p>






<p class="has-text-align-center"><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community.</p>



<a href="https://www.patreon.com/braininspired"></a>






<p>Cameron Buckner is a philosopher and cognitive scientist at The University of Houston. He is writing a book about the age-old philosophical debate on how much of our knowledge is innate (nature, rationalism) versus how much is learned (nurture, empiricism). In the book and his other works, Cameron argues that modern AI can help settle the debate. In particular, he suggests we focus on what types of psychological "domain-general faculties" underlie our own intelligence, and how different kinds of deep learning models are revealing how those faculties may be implemented in our brains. The hope is that by building systems that possess the right handful of faculties, and putting those systems together in a way they can cooperate in a general and flexible manner, it will result in cognitive architectures we would call intelligent. Thus, what Cameron calls The New DoGMA: Domain-General Modular Architecture. We also discuss his work on mental representation and how representations get their content - how our thoughts connect to the natural external world. </p>



<ul><li><a href="http://cameronbuckner.net/professional/index.htm">Cameron's Website</a>.</li><li>Twitter: <a href="https://twitter.com/cameronjbuckner">@cameronjbuckner</a>.</li><li>Related papers<ul><li><a href="http://cameronbuckner.net/professional/deeplearning.pdf">Empiricism without Magic: Transformational Abstraction in Deep Convolutional Neural Networks</a>.</li><li><a href="http://cameronbuckner.net/professional/forwardlooking.pdf">A Forward-Looking Theory of Content</a>.</li></ul></li><li>Other sources Cameron mentions:<ul><li><a href="https://arxiv.org/abs/1801.05667">Innateness, AlphaZero, and Artificial Intelligence (Gary Marcus)</a>.</li><li><a href="http://causality.cs.ucla.edu/blog/index.php/2020/07/26/radical-empiricism-and-machine-learning-research/">Radical Empiricism and Machine Learning Research (Judea Pearl)</a>.</li><li><a href="https://link.springer.com/article/10.1007/s11229-021-03028-4">Fodor’s guide to the Humean mind (Tamás Demeter)</a>.</li></ul></li></ul>



<p>0:00 - Intro
4:55 - Interpreting old philosophy
8:26 - AI and philosophy
17:00 - Empiricism vs. rationalism
27:09 - Domain-general faculties
33:10 - Faculty psychology
40:28 - New faculties?
46:11 - Human faculties
51:15 - Cognitive architectures
56:26 - Language
1:01:40 - Beyond dichotomous thinking
1:04:08 - Lower-level faculties
1:10:16 - Animal cognition
1:14:31 - A Forward-Looking Theory of Content</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Check out my free video series about what's missing in AI and Neuroscience






Support the show to get full episodes and join the Discord community.










Cameron Buckner is a philosopher and cognitive scientist at The University of Houston. He is writing a book about the age-old philosophical debate on how much of our knowledge is innate (nature, rationalism) versus how much is learned (nurture, empiricism). In the book and his other works, Cameron argues that modern AI can help settle the debate. In particular, he suggests we focus on what types of psychological "domain-general faculties" underlie our own intelligence, and how different kinds of deep learning models are revealing how those faculties may be implemented in our brains. The hope is that by building systems that possess the right handful of faculties, and putting those systems together in a way they can cooperate in a general and flexible manner, it will result in cognitive architectures we would call intelligent. Thus, what Cameron calls The New DoGMA: Domain-General Modular Architecture. We also discuss his work on mental representation and how representations get their content - how our thoughts connect to the natural external world. 



Cameron's Website.Twitter: @cameronjbuckner.Related papersEmpiricism without Magic: Transformational Abstraction in Deep Convolutional Neural Networks.A Forward-Looking Theory of Content.Other sources Cameron mentions:Innateness, AlphaZero, and Artificial Intelligence (Gary Marcus).Radical Empiricism and Machine Learning Research (Judea Pearl).Fodor’s guide to the Humean mind (Tamás Demeter).



0:00 - Intro
4:55 - Interpreting old philosophy
8:26 - AI and philosophy
17:00 - Empiricism vs. rationalism
27:09 - Domain-general faculties
33:10 - Faculty psychology
40:28 - New faculties?
46:11 - Human faculties
51:15 - Cognitive architectures
56:26 - Language
1:01:40 - Beyond dichotomous thinking
1:04:08 - Lower-level faculties
1:10:16 - Animal cognition
1:14:31 - A Forward-Looking Theory of Content]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 142 Cameron Buckner: The New DoGMA]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener">Check out my free video series about what's missing in AI and Neuroscience</a></p>






<p class="has-text-align-center"><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community.</p>



<a href="https://www.patreon.com/braininspired"></a>






<p>Cameron Buckner is a philosopher and cognitive scientist at The University of Houston. He is writing a book about the age-old philosophical debate on how much of our knowledge is innate (nature, rationalism) versus how much is learned (nurture, empiricism). In the book and his other works, Cameron argues that modern AI can help settle the debate. In particular, he suggests we focus on what types of psychological "domain-general faculties" underlie our own intelligence, and how different kinds of deep learning models are revealing how those faculties may be implemented in our brains. The hope is that by building systems that possess the right handful of faculties, and putting those systems together in a way they can cooperate in a general and flexible manner, it will result in cognitive architectures we would call intelligent. Thus, what Cameron calls The New DoGMA: Domain-General Modular Architecture. We also discuss his work on mental representation and how representations get their content - how our thoughts connect to the natural external world. </p>



<ul><li><a href="http://cameronbuckner.net/professional/index.htm">Cameron's Website</a>.</li><li>Twitter: <a href="https://twitter.com/cameronjbuckner">@cameronjbuckner</a>.</li><li>Related papers<ul><li><a href="http://cameronbuckner.net/professional/deeplearning.pdf">Empiricism without Magic: Transformational Abstraction in Deep Convolutional Neural Networks</a>.</li><li><a href="http://cameronbuckner.net/professional/forwardlooking.pdf">A Forward-Looking Theory of Content</a>.</li></ul></li><li>Other sources Cameron mentions:<ul><li><a href="https://arxiv.org/abs/1801.05667">Innateness, AlphaZero, and Artificial Intelligence (Gary Marcus)</a>.</li><li><a href="http://causality.cs.ucla.edu/blog/index.php/2020/07/26/radical-empiricism-and-machine-learning-research/">Radical Empiricism and Machine Learning Research (Judea Pearl)</a>.</li><li><a href="https://link.springer.com/article/10.1007/s11229-021-03028-4">Fodor’s guide to the Humean mind (Tamás Demeter)</a>.</li></ul></li></ul>



<p>0:00 - Intro
4:55 - Interpreting old philosophy
8:26 - AI and philosophy
17:00 - Empiricism vs. rationalism
27:09 - Domain-general faculties
33:10 - Faculty psychology
40:28 - New faculties?
46:11 - Human faculties
51:15 - Cognitive architectures
56:26 - Language
1:01:40 - Beyond dichotomous thinking
1:04:08 - Lower-level faculties
1:10:16 - Animal cognition
1:14:31 - A Forward-Looking Theory of Content</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/3058a975-a5b0-4cef-b37a-c7114a383351-142-Cameron-Buckner-public.mp3" length="99436595"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Check out my free video series about what's missing in AI and Neuroscience






Support the show to get full episodes and join the Discord community.










Cameron Buckner is a philosopher and cognitive scientist at The University of Houston. He is writing a book about the age-old philosophical debate on how much of our knowledge is innate (nature, rationalism) versus how much is learned (nurture, empiricism). In the book and his other works, Cameron argues that modern AI can help settle the debate. In particular, he suggests we focus on what types of psychological "domain-general faculties" underlie our own intelligence, and how different kinds of deep learning models are revealing how those faculties may be implemented in our brains. The hope is that by building systems that possess the right handful of faculties, and putting those systems together in a way they can cooperate in a general and flexible manner, it will result in cognitive architectures we would call intelligent. Thus, what Cameron calls The New DoGMA: Domain-General Modular Architecture. We also discuss his work on mental representation and how representations get their content - how our thoughts connect to the natural external world. 



Cameron's Website.Twitter: @cameronjbuckner.Related papersEmpiricism without Magic: Transformational Abstraction in Deep Convolutional Neural Networks.A Forward-Looking Theory of Content.Other sources Cameron mentions:Innateness, AlphaZero, and Artificial Intelligence (Gary Marcus).Radical Empiricism and Machine Learning Research (Judea Pearl).Fodor’s guide to the Humean mind (Tamás Demeter).



0:00 - Intro
4:55 - Interpreting old philosophy
8:26 - AI and philosophy
17:00 - Empiricism vs. rationalism
27:09 - Domain-general faculties
33:10 - Faculty psychology
40:28 - New faculties?
46:11 - Human faculties
51:15 - Cognitive architectures
56:26 - Language
1:01:40 - Beyond dichotomous thinking
1:04:08 - Lower-level faculties
1:10:16 - Animal cognition
1:14:31 - A Forward-Looking Theory of Content]]>
                </itunes:summary>
                                                                            <itunes:duration>01:43:16</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 141 Carina Curto: From Structure to Dynamics]]>
                </title>
                <pubDate>Tue, 12 Jul 2022 19:42:41 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-141-carina-curto-from-structure-to-dynamics</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-141-carina-curto-from-structure-to-dynamics</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener">Check out my short video series about what's missing in AI and Neuroscience.</a></p>



<a href="https://braininspired.co/open/"></a>



<p class="has-text-align-center"><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p>Carina Curto is a professor in the Department of Mathematics at The Pennsylvania State University. She uses her background skills in mathematical physics/string theory to study networks of neurons. On this episode, we discuss the world of topology in neuroscience - the study of the geometrical structures mapped out by active populations of neurons. We also discuss her work on "combinatorial linear threshold networks" (CLTNs). Unlike the large deep learning models popular today as models of brain activity, the CLTNs Carina builds are relatively simple, abstracted graphical models. This property is important to Carina, whose goal is to develop mathematically tractable neural network models. Carina has worked out how the structure of many CLTNs allows prediction of the model's allowable dynamics, how motifs of model structure can be embedded in larger models while retaining their dynamical features, and more. The hope is that these elegant models can tell us more about the principles our messy brains employ to generate the robust and beautiful dynamics underlying our cognition.</p>



<ul><li><a href="https://www.personal.psu.edu/cpc16/">Carina's website</a>.</li><li><a href="https://sites.psu.edu/mathneurolab/">The Mathematical Neuroscience Lab</a>.</li><li>Related papers<ul><li><a href="https://www.personal.psu.edu/cpc16/Curto-whitepaper-2013.pdf">A major obstacle impeding progress in brain science is the lack of beautiful models.</a></li><li><a href="https://arxiv.org/abs/1605.01905">What can topology tells us about the neural code?</a></li><li><a href="https://arxiv.org/abs/1804.01487">Predicting neural network dynamics via graphical analysis</a></li></ul></li></ul>



<p>0:00 - Intro
4:25 - Background: Physics and math to study brains
20:45 - Beautiful and ugly models
35:40 - Topology
43:14 - Topology in hippocampal navigation
56:04 - Topology vs. dynamical systems theory
59:10 - Combinatorial linear threshold networks
1:25:26 - How much more math do we need to invent?</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Check out my short video series about what's missing in AI and Neuroscience.







Support the show to get full episodes and join the Discord community.









Carina Curto is a professor in the Department of Mathematics at The Pennsylvania State University. She uses her background skills in mathematical physics/string theory to study networks of neurons. On this episode, we discuss the world of topology in neuroscience - the study of the geometrical structures mapped out by active populations of neurons. We also discuss her work on "combinatorial linear threshold networks" (CLTNs). Unlike the large deep learning models popular today as models of brain activity, the CLTNs Carina builds are relatively simple, abstracted graphical models. This property is important to Carina, whose goal is to develop mathematically tractable neural network models. Carina has worked out how the structure of many CLTNs allows prediction of the model's allowable dynamics, how motifs of model structure can be embedded in larger models while retaining their dynamical features, and more. The hope is that these elegant models can tell us more about the principles our messy brains employ to generate the robust and beautiful dynamics underlying our cognition.



Carina's website.The Mathematical Neuroscience Lab.Related papersA major obstacle impeding progress in brain science is the lack of beautiful models.What can topology tells us about the neural code?Predicting neural network dynamics via graphical analysis



0:00 - Intro
4:25 - Background: Physics and math to study brains
20:45 - Beautiful and ugly models
35:40 - Topology
43:14 - Topology in hippocampal navigation
56:04 - Topology vs. dynamical systems theory
59:10 - Combinatorial linear threshold networks
1:25:26 - How much more math do we need to invent?]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 141 Carina Curto: From Structure to Dynamics]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener">Check out my short video series about what's missing in AI and Neuroscience.</a></p>



<a href="https://braininspired.co/open/"></a>



<p class="has-text-align-center"><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p>Carina Curto is a professor in the Department of Mathematics at The Pennsylvania State University. She uses her background skills in mathematical physics/string theory to study networks of neurons. On this episode, we discuss the world of topology in neuroscience - the study of the geometrical structures mapped out by active populations of neurons. We also discuss her work on "combinatorial linear threshold networks" (CLTNs). Unlike the large deep learning models popular today as models of brain activity, the CLTNs Carina builds are relatively simple, abstracted graphical models. This property is important to Carina, whose goal is to develop mathematically tractable neural network models. Carina has worked out how the structure of many CLTNs allows prediction of the model's allowable dynamics, how motifs of model structure can be embedded in larger models while retaining their dynamical features, and more. The hope is that these elegant models can tell us more about the principles our messy brains employ to generate the robust and beautiful dynamics underlying our cognition.</p>



<ul><li><a href="https://www.personal.psu.edu/cpc16/">Carina's website</a>.</li><li><a href="https://sites.psu.edu/mathneurolab/">The Mathematical Neuroscience Lab</a>.</li><li>Related papers<ul><li><a href="https://www.personal.psu.edu/cpc16/Curto-whitepaper-2013.pdf">A major obstacle impeding progress in brain science is the lack of beautiful models.</a></li><li><a href="https://arxiv.org/abs/1605.01905">What can topology tells us about the neural code?</a></li><li><a href="https://arxiv.org/abs/1804.01487">Predicting neural network dynamics via graphical analysis</a></li></ul></li></ul>



<p>0:00 - Intro
4:25 - Background: Physics and math to study brains
20:45 - Beautiful and ugly models
35:40 - Topology
43:14 - Topology in hippocampal navigation
56:04 - Topology vs. dynamical systems theory
59:10 - Combinatorial linear threshold networks
1:25:26 - How much more math do we need to invent?</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/34010b0e-bfff-4039-bfd8-5cb4da6ef953-141-Carina-Curto-public.mp3" length="88309733"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Check out my short video series about what's missing in AI and Neuroscience.







Support the show to get full episodes and join the Discord community.









Carina Curto is a professor in the Department of Mathematics at The Pennsylvania State University. She uses her background skills in mathematical physics/string theory to study networks of neurons. On this episode, we discuss the world of topology in neuroscience - the study of the geometrical structures mapped out by active populations of neurons. We also discuss her work on "combinatorial linear threshold networks" (CLTNs). Unlike the large deep learning models popular today as models of brain activity, the CLTNs Carina builds are relatively simple, abstracted graphical models. This property is important to Carina, whose goal is to develop mathematically tractable neural network models. Carina has worked out how the structure of many CLTNs allows prediction of the model's allowable dynamics, how motifs of model structure can be embedded in larger models while retaining their dynamical features, and more. The hope is that these elegant models can tell us more about the principles our messy brains employ to generate the robust and beautiful dynamics underlying our cognition.



Carina's website.The Mathematical Neuroscience Lab.Related papersA major obstacle impeding progress in brain science is the lack of beautiful models.What can topology tells us about the neural code?Predicting neural network dynamics via graphical analysis



0:00 - Intro
4:25 - Background: Physics and math to study brains
20:45 - Beautiful and ugly models
35:40 - Topology
43:14 - Topology in hippocampal navigation
56:04 - Topology vs. dynamical systems theory
59:10 - Combinatorial linear threshold networks
1:25:26 - How much more math do we need to invent?]]>
                </itunes:summary>
                                                                            <itunes:duration>01:31:40</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 140 Jeff Schall: Decisions and Eye Movements]]>
                </title>
                <pubDate>Thu, 30 Jun 2022 22:37:51 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-140-jeff-schall-decisions-and-eye-movements</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-140-jeff-schall-decisions-and-eye-movements</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener">Check out my short video series about what's missing in AI and Neuroscience.</a></p>



<a href="https://braininspired.co/open/"></a>



<p class="has-text-align-center"><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p>Jeff Schall is the director of the Center for Visual Neurophysiology at York University, where he runs the <a href="http://www.psy.vanderbilt.edu/faculty/schall/">Schall Lab</a>. His research centers around studying the mechanisms of our decisions, choices, movement control, and attention within the saccadic eye movement brain systems and in mathematical psychology models- in other words, how we decide where and when to look. Jeff was my postdoctoral advisor at Vanderbilt University, and I wanted to revisit a few guiding principles he instills in all his students. <a href="https://pubmed.ncbi.nlm.nih.gov/6395480/">Linking Propositions</a> by Davida Teller are a series of logical statements to ensure we rigorously connect the brain activity we record to the psychological functions we want to explain. <a href="https://bio.research.ucsc.edu/~barrylab/classes/bio183w/PlattSci1964_Strong_Inference.pdf">Strong Inference</a> by John Platt is the scientific method on steroids - a way to make our scientific practice most productive and efficient. We discuss both of these topics in the context of Jeff's eye movement and decision-making science. We also discuss how neurophysiology has changed over the past 30 years, we compare the relatively small models he employs with the huge deep learning models, many of his current projects, and plenty more. If you want to learn more about Jeff's work and approach, I recommend reading in order two of his review papers we discuss as well. One was written 20 years ago (<a href="http://www.psy.vanderbilt.edu/courses/hon182/Schall_Ann_Rev_Psych_2004.pdf">On Building a Bridge Between Brain and Behavior</a>), and the other 2-ish years ago (<a href="http://www.psy.vanderbilt.edu/faculty/schall/pdfs/Schall_TINS_2019.pdf">Accumulators, Neurons, and Response Time</a>).</p>



<ul><li><a href="http://www.psy.vanderbilt.edu/faculty/schall/">Schall Lab</a>.</li><li>Twitter: <a href="https://twitter.com/LabSchall">@LabSchall</a>.</li><li>Related papers<ul><li><a href="https://pubmed.ncbi.nlm.nih.gov/6395480/">Linking Propositions</a>.</li><li><a href="https://bio.research.ucsc.edu/~barrylab/classes/bio183w/PlattSci1964_Strong_Inference.pdf">Strong Inference</a>.</li><li><a href="http://www.psy.vanderbilt.edu/courses/hon182/Schall_Ann_Rev_Psych_2004.pdf">On Building a Bridge Between Brain and Behavior</a>.</li><li><a href="http://www.psy.vanderbilt.edu/faculty/schall/pdfs/Schall_TINS_2019.pdf">Accumulators, Neurons, and Response Time</a>.</li></ul></li></ul>



<p>0:00 - Intro
6:51 - Neurophysiology old and new
14:50 - Linking propositions
24:18 - Psychology working with neurophysiology
35:40 - Neuron doctrine, population doctrine
40:28 - Strong Inference and deep learning
46:37 - Model mimicry
51:56 - Scientific fads
57:07 - Current projects
1:06:38 - On leaving academia
1:13:51 - How academia has changed for better and worse</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Check out my short video series about what's missing in AI and Neuroscience.







Support the show to get full episodes and join the Discord community.









Jeff Schall is the director of the Center for Visual Neurophysiology at York University, where he runs the Schall Lab. His research centers around studying the mechanisms of our decisions, choices, movement control, and attention within the saccadic eye movement brain systems and in mathematical psychology models- in other words, how we decide where and when to look. Jeff was my postdoctoral advisor at Vanderbilt University, and I wanted to revisit a few guiding principles he instills in all his students. Linking Propositions by Davida Teller are a series of logical statements to ensure we rigorously connect the brain activity we record to the psychological functions we want to explain. Strong Inference by John Platt is the scientific method on steroids - a way to make our scientific practice most productive and efficient. We discuss both of these topics in the context of Jeff's eye movement and decision-making science. We also discuss how neurophysiology has changed over the past 30 years, we compare the relatively small models he employs with the huge deep learning models, many of his current projects, and plenty more. If you want to learn more about Jeff's work and approach, I recommend reading in order two of his review papers we discuss as well. One was written 20 years ago (On Building a Bridge Between Brain and Behavior), and the other 2-ish years ago (Accumulators, Neurons, and Response Time).



Schall Lab.Twitter: @LabSchall.Related papersLinking Propositions.Strong Inference.On Building a Bridge Between Brain and Behavior.Accumulators, Neurons, and Response Time.



0:00 - Intro
6:51 - Neurophysiology old and new
14:50 - Linking propositions
24:18 - Psychology working with neurophysiology
35:40 - Neuron doctrine, population doctrine
40:28 - Strong Inference and deep learning
46:37 - Model mimicry
51:56 - Scientific fads
57:07 - Current projects
1:06:38 - On leaving academia
1:13:51 - How academia has changed for better and worse]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 140 Jeff Schall: Decisions and Eye Movements]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener">Check out my short video series about what's missing in AI and Neuroscience.</a></p>



<a href="https://braininspired.co/open/"></a>



<p class="has-text-align-center"><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p>Jeff Schall is the director of the Center for Visual Neurophysiology at York University, where he runs the <a href="http://www.psy.vanderbilt.edu/faculty/schall/">Schall Lab</a>. His research centers around studying the mechanisms of our decisions, choices, movement control, and attention within the saccadic eye movement brain systems and in mathematical psychology models- in other words, how we decide where and when to look. Jeff was my postdoctoral advisor at Vanderbilt University, and I wanted to revisit a few guiding principles he instills in all his students. <a href="https://pubmed.ncbi.nlm.nih.gov/6395480/">Linking Propositions</a> by Davida Teller are a series of logical statements to ensure we rigorously connect the brain activity we record to the psychological functions we want to explain. <a href="https://bio.research.ucsc.edu/~barrylab/classes/bio183w/PlattSci1964_Strong_Inference.pdf">Strong Inference</a> by John Platt is the scientific method on steroids - a way to make our scientific practice most productive and efficient. We discuss both of these topics in the context of Jeff's eye movement and decision-making science. We also discuss how neurophysiology has changed over the past 30 years, we compare the relatively small models he employs with the huge deep learning models, many of his current projects, and plenty more. If you want to learn more about Jeff's work and approach, I recommend reading in order two of his review papers we discuss as well. One was written 20 years ago (<a href="http://www.psy.vanderbilt.edu/courses/hon182/Schall_Ann_Rev_Psych_2004.pdf">On Building a Bridge Between Brain and Behavior</a>), and the other 2-ish years ago (<a href="http://www.psy.vanderbilt.edu/faculty/schall/pdfs/Schall_TINS_2019.pdf">Accumulators, Neurons, and Response Time</a>).</p>



<ul><li><a href="http://www.psy.vanderbilt.edu/faculty/schall/">Schall Lab</a>.</li><li>Twitter: <a href="https://twitter.com/LabSchall">@LabSchall</a>.</li><li>Related papers<ul><li><a href="https://pubmed.ncbi.nlm.nih.gov/6395480/">Linking Propositions</a>.</li><li><a href="https://bio.research.ucsc.edu/~barrylab/classes/bio183w/PlattSci1964_Strong_Inference.pdf">Strong Inference</a>.</li><li><a href="http://www.psy.vanderbilt.edu/courses/hon182/Schall_Ann_Rev_Psych_2004.pdf">On Building a Bridge Between Brain and Behavior</a>.</li><li><a href="http://www.psy.vanderbilt.edu/faculty/schall/pdfs/Schall_TINS_2019.pdf">Accumulators, Neurons, and Response Time</a>.</li></ul></li></ul>



<p>0:00 - Intro
6:51 - Neurophysiology old and new
14:50 - Linking propositions
24:18 - Psychology working with neurophysiology
35:40 - Neuron doctrine, population doctrine
40:28 - Strong Inference and deep learning
46:37 - Model mimicry
51:56 - Scientific fads
57:07 - Current projects
1:06:38 - On leaving academia
1:13:51 - How academia has changed for better and worse</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/925cbb60-2b51-499d-a25a-6026e52c9bf3-140-Jeff-Schall-public.mp3" length="77447615"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Check out my short video series about what's missing in AI and Neuroscience.







Support the show to get full episodes and join the Discord community.









Jeff Schall is the director of the Center for Visual Neurophysiology at York University, where he runs the Schall Lab. His research centers around studying the mechanisms of our decisions, choices, movement control, and attention within the saccadic eye movement brain systems and in mathematical psychology models- in other words, how we decide where and when to look. Jeff was my postdoctoral advisor at Vanderbilt University, and I wanted to revisit a few guiding principles he instills in all his students. Linking Propositions by Davida Teller are a series of logical statements to ensure we rigorously connect the brain activity we record to the psychological functions we want to explain. Strong Inference by John Platt is the scientific method on steroids - a way to make our scientific practice most productive and efficient. We discuss both of these topics in the context of Jeff's eye movement and decision-making science. We also discuss how neurophysiology has changed over the past 30 years, we compare the relatively small models he employs with the huge deep learning models, many of his current projects, and plenty more. If you want to learn more about Jeff's work and approach, I recommend reading in order two of his review papers we discuss as well. One was written 20 years ago (On Building a Bridge Between Brain and Behavior), and the other 2-ish years ago (Accumulators, Neurons, and Response Time).



Schall Lab.Twitter: @LabSchall.Related papersLinking Propositions.Strong Inference.On Building a Bridge Between Brain and Behavior.Accumulators, Neurons, and Response Time.



0:00 - Intro
6:51 - Neurophysiology old and new
14:50 - Linking propositions
24:18 - Psychology working with neurophysiology
35:40 - Neuron doctrine, population doctrine
40:28 - Strong Inference and deep learning
46:37 - Model mimicry
51:56 - Scientific fads
57:07 - Current projects
1:06:38 - On leaving academia
1:13:51 - How academia has changed for better and worse]]>
                </itunes:summary>
                                                                            <itunes:duration>01:20:22</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 139 Marc Howard: Compressed Time and Memory]]>
                </title>
                <pubDate>Mon, 20 Jun 2022 16:49:31 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-139-marc-howard-compressed-time-and-memory</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-139-marc-howard-compressed-time-and-memory</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener">Check out my free video series about what's missing in AI and Neuroscience</a></p>



<a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener"></a>



<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>



<p>Marc Howard runs his <a href="https://sites.bu.edu/tcn/">Theoretical Cognitive Neuroscience Lab</a> at Boston University, where he develops mathematical models of cognition, constrained by psychological and neural data. In this episode, we discuss the idea that a <a href="https://en.wikipedia.org/wiki/Laplace_transform">Laplace transform</a> and its inverse may serve as a unified framework for memory. In short, our memories are compressed on a continuous log-scale: as memories get older, their representations "spread out" in time. It turns out this kind of representation seems ubiquitous in the brain and across cognitive functions, suggesting it is likely a canonical computation our brains use to represent a wide variety of cognitive functions. We also discuss some of the ways Marc is incorporating this mathematical operation in deep learning nets to improve their ability to handle information at different time scales.</p>



<ul>
<li><a href="https://sites.bu.edu/tcn/">Theoretical Cognitive Neuroscience Lab</a>. </li>



<li>Twitter: <a href="https://twitter.com/marcwhoward777">@marcwhoward777</a>.</li>



<li>Related papers:
<ul>
<li><a href="https://sites.bu.edu/tcn/files/2017/06/TiCSVision.pdf">Memory as perception of the past: Compressed time in mind and brain.</a></li>



<li><a href="http://arxiv.org/abs/2201.01796">Formal models of memory based on temporally-varying representations.</a></li>



<li><a href="http://arxiv.org/abs/2003.11668">Cognitive computation using neural representations of time and space in the Laplace domain.</a></li>



<li><a href="https://www.youtube.com/watch?v=DRXcK0iTPUc&amp;t=731s">Time as a continuous dimension in natural and artificial networks</a>.</li>



<li><a href="https://arxiv.org/abs/2104.04646">DeepSITH: Efficient learning via decomposition of what and when across time scales.</a></li>
</ul>
</li>
</ul>



<p>0:00 - Intro
4:57 - Main idea: Laplace transforms
12:00 - Time cells
20:08 - Laplace, compression, and time cells
25:34 - Everywhere in the brain
29:28 - Episodic memory
35:11 - Randy Gallistel's memory idea
40:37 - Adding Laplace to deep nets
48:04 - Reinforcement learning
1:00:52 - Brad Wyble Q: What gets filtered out?
1:05:38 - Replay and complementary learning systems
1:11:52 - Howard Goldowsky Q: Gyorgy Buzsaki
1:15:10 - Obstacles</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Check out my free video series about what's missing in AI and Neuroscience







Support the show to get full episodes and join the Discord community.







Marc Howard runs his Theoretical Cognitive Neuroscience Lab at Boston University, where he develops mathematical models of cognition, constrained by psychological and neural data. In this episode, we discuss the idea that a Laplace transform and its inverse may serve as a unified framework for memory. In short, our memories are compressed on a continuous log-scale: as memories get older, their representations "spread out" in time. It turns out this kind of representation seems ubiquitous in the brain and across cognitive functions, suggesting it is likely a canonical computation our brains use to represent a wide variety of cognitive functions. We also discuss some of the ways Marc is incorporating this mathematical operation in deep learning nets to improve their ability to handle information at different time scales.




Theoretical Cognitive Neuroscience Lab. 



Twitter: @marcwhoward777.



Related papers:

Memory as perception of the past: Compressed time in mind and brain.



Formal models of memory based on temporally-varying representations.



Cognitive computation using neural representations of time and space in the Laplace domain.



Time as a continuous dimension in natural and artificial networks.



DeepSITH: Efficient learning via decomposition of what and when across time scales.






0:00 - Intro
4:57 - Main idea: Laplace transforms
12:00 - Time cells
20:08 - Laplace, compression, and time cells
25:34 - Everywhere in the brain
29:28 - Episodic memory
35:11 - Randy Gallistel's memory idea
40:37 - Adding Laplace to deep nets
48:04 - Reinforcement learning
1:00:52 - Brad Wyble Q: What gets filtered out?
1:05:38 - Replay and complementary learning systems
1:11:52 - Howard Goldowsky Q: Gyorgy Buzsaki
1:15:10 - Obstacles]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 139 Marc Howard: Compressed Time and Memory]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener">Check out my free video series about what's missing in AI and Neuroscience</a></p>



<a href="https://braininspired.co/open/" target="_blank" rel="noreferrer noopener"></a>



<p class="has-text-align-center"><strong><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community</strong>.</p>



<a href="https://www.patreon.com/braininspired"></a>



<p>Marc Howard runs his <a href="https://sites.bu.edu/tcn/">Theoretical Cognitive Neuroscience Lab</a> at Boston University, where he develops mathematical models of cognition, constrained by psychological and neural data. In this episode, we discuss the idea that a <a href="https://en.wikipedia.org/wiki/Laplace_transform">Laplace transform</a> and its inverse may serve as a unified framework for memory. In short, our memories are compressed on a continuous log-scale: as memories get older, their representations "spread out" in time. It turns out this kind of representation seems ubiquitous in the brain and across cognitive functions, suggesting it is likely a canonical computation our brains use to represent a wide variety of cognitive functions. We also discuss some of the ways Marc is incorporating this mathematical operation in deep learning nets to improve their ability to handle information at different time scales.</p>



<ul>
<li><a href="https://sites.bu.edu/tcn/">Theoretical Cognitive Neuroscience Lab</a>. </li>



<li>Twitter: <a href="https://twitter.com/marcwhoward777">@marcwhoward777</a>.</li>



<li>Related papers:
<ul>
<li><a href="https://sites.bu.edu/tcn/files/2017/06/TiCSVision.pdf">Memory as perception of the past: Compressed time in mind and brain.</a></li>



<li><a href="http://arxiv.org/abs/2201.01796">Formal models of memory based on temporally-varying representations.</a></li>



<li><a href="http://arxiv.org/abs/2003.11668">Cognitive computation using neural representations of time and space in the Laplace domain.</a></li>



<li><a href="https://www.youtube.com/watch?v=DRXcK0iTPUc&amp;t=731s">Time as a continuous dimension in natural and artificial networks</a>.</li>



<li><a href="https://arxiv.org/abs/2104.04646">DeepSITH: Efficient learning via decomposition of what and when across time scales.</a></li>
</ul>
</li>
</ul>



<p>0:00 - Intro
4:57 - Main idea: Laplace transforms
12:00 - Time cells
20:08 - Laplace, compression, and time cells
25:34 - Everywhere in the brain
29:28 - Episodic memory
35:11 - Randy Gallistel's memory idea
40:37 - Adding Laplace to deep nets
48:04 - Reinforcement learning
1:00:52 - Brad Wyble Q: What gets filtered out?
1:05:38 - Replay and complementary learning systems
1:11:52 - Howard Goldowsky Q: Gyorgy Buzsaki
1:15:10 - Obstacles</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/ebc23047-d72b-4ea5-8ec8-400fa705ae17-139-Marc-Howard-Public.mp3" length="77276449"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Check out my free video series about what's missing in AI and Neuroscience







Support the show to get full episodes and join the Discord community.







Marc Howard runs his Theoretical Cognitive Neuroscience Lab at Boston University, where he develops mathematical models of cognition, constrained by psychological and neural data. In this episode, we discuss the idea that a Laplace transform and its inverse may serve as a unified framework for memory. In short, our memories are compressed on a continuous log-scale: as memories get older, their representations "spread out" in time. It turns out this kind of representation seems ubiquitous in the brain and across cognitive functions, suggesting it is likely a canonical computation our brains use to represent a wide variety of cognitive functions. We also discuss some of the ways Marc is incorporating this mathematical operation in deep learning nets to improve their ability to handle information at different time scales.




Theoretical Cognitive Neuroscience Lab. 



Twitter: @marcwhoward777.



Related papers:

Memory as perception of the past: Compressed time in mind and brain.



Formal models of memory based on temporally-varying representations.



Cognitive computation using neural representations of time and space in the Laplace domain.



Time as a continuous dimension in natural and artificial networks.



DeepSITH: Efficient learning via decomposition of what and when across time scales.






0:00 - Intro
4:57 - Main idea: Laplace transforms
12:00 - Time cells
20:08 - Laplace, compression, and time cells
25:34 - Everywhere in the brain
29:28 - Episodic memory
35:11 - Randy Gallistel's memory idea
40:37 - Adding Laplace to deep nets
48:04 - Reinforcement learning
1:00:52 - Brad Wyble Q: What gets filtered out?
1:05:38 - Replay and complementary learning systems
1:11:52 - Howard Goldowsky Q: Gyorgy Buzsaki
1:15:10 - Obstacles]]>
                </itunes:summary>
                                                                            <itunes:duration>01:20:11</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 138 Matthew Larkum: The Dendrite Hypothesis]]>
                </title>
                <pubDate>Mon, 06 Jun 2022 14:58:39 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-138-matthew-larkum-the-dendrite-hypothesis</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-138-matthew-larkum-the-dendrite-hypothesis</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><a href="https://braininspired.co/bi-workshop-open-questions-opt-in/">Check out my short video series about what's missing in AI and Neuroscience.</a></p>



<a href="https://braininspired.co/bi-workshop-open-questions-opt-in/"></a>



<p class="has-text-align-center"><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p>Matthew Larkum runs his lab at Humboldt University of Berlin, where his group studies how dendrites contribute to  computations within and across layers of the neocortex. Since the late 1990s, Matthew has continued to uncover key properties of the way pyramidal neurons stretch across layers of the cortex, their dendrites receiving inputs from those different layers - and thus different brain areas. For example, layer 5 pyramidal neurons have a set of basal dendrites near the cell body that receives feedforward-like input, and a set of apical dendrites all the way up in layer 1 that receives feedback--like input. Depending on which set of dendrites is receiving input, or neither or both, the neuron's output functions in different modes- silent, regular spiking, or burst spiking. Matthew realized the different sets of dendritic inputs could signal different operations, often pairing feedforward sensory--like signals and feedback context-like signals. His research has shown this kind of coincidence detection is important for cognitive functions like perception, memory, learning, and even wakefulness. We discuss many of his ideas and research findings, why dendrites have long been neglected in favor of neuron cell bodies, the possibility of learning about computations by studying implementation-level phenomena, and much more.</p>



<ul><li><a href="https://www.projekte.hu-berlin.de/en/larkum">Larkum Lab</a>.</li><li>Twitter: <a href="https://twitter.com/mattlark">@mattlark</a>.</li><li>Related papers<ul><li><a href="https://www.cell.com/trends/cognitive-sciences/fulltext/S1364-6613(20)30175-3?_returnURL=https%3A%2F%2Flinkinghub.elsevier.com%2Fretrieve%2Fpii%2FS1364661320301753%3Fshowall%3Dtrue">Cellular Mechanisms of Conscious Processing</a>.</li><li><a href="https://pubmed.ncbi.nlm.nih.gov/33335033/">Perirhinal input to neocortical layer 1 controls learning</a>. (bioRxiv link: <a href="https://www.biorxiv.org/content/10.1101/713883v1">https://www.biorxiv.org/content/10.1101/713883v1</a>)</li><li><a href="https://doi.org/10.1016/j.neuroscience.2022.03.008">Are dendrites conceptually useful?</a></li><li><a href="https://doi.org/10.1126/science.abk1859">Memories off the top of your head</a>.</li><li><a href="https://www.researchgate.net/publication/359861041_Do_action_potentials_cause_consciousness">Do Action Potentials Cause Consciousness?</a></li></ul></li><li><a href="https://braininspired.co/podcast/9/">Blake Richard's episode</a> discussing back-propagation in the brain (based on Matthew's experiments)</li></ul>



<p>0:00 - Intro
5:31 - Background: Dendrites
23:20 - Cortical neuron bodies vs. branches
25:47 - Theories of cortex
30:49 - Feedforward and feedback hierarchy
37:40 - Dendritic integration hypothesis
44:32 - DIT vs. other consciousness theories
51:30 - Mac Shine Q1
1:04:38 - Are dendrites conceptually useful?
1:09:15 - Insights from implementation level
1:24:44 - How detailed to model?
1:28:15 - Do action potentials cause consciousness?
1:40:33 - Mac Shine Q2</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Check out my short video series about what's missing in AI and Neuroscience.







Support the show to get full episodes and join the Discord community.









Matthew Larkum runs his lab at Humboldt University of Berlin, where his group studies how dendrites contribute to  computations within and across layers of the neocortex. Since the late 1990s, Matthew has continued to uncover key properties of the way pyramidal neurons stretch across layers of the cortex, their dendrites receiving inputs from those different layers - and thus different brain areas. For example, layer 5 pyramidal neurons have a set of basal dendrites near the cell body that receives feedforward-like input, and a set of apical dendrites all the way up in layer 1 that receives feedback--like input. Depending on which set of dendrites is receiving input, or neither or both, the neuron's output functions in different modes- silent, regular spiking, or burst spiking. Matthew realized the different sets of dendritic inputs could signal different operations, often pairing feedforward sensory--like signals and feedback context-like signals. His research has shown this kind of coincidence detection is important for cognitive functions like perception, memory, learning, and even wakefulness. We discuss many of his ideas and research findings, why dendrites have long been neglected in favor of neuron cell bodies, the possibility of learning about computations by studying implementation-level phenomena, and much more.



Larkum Lab.Twitter: @mattlark.Related papersCellular Mechanisms of Conscious Processing.Perirhinal input to neocortical layer 1 controls learning. (bioRxiv link: https://www.biorxiv.org/content/10.1101/713883v1)Are dendrites conceptually useful?Memories off the top of your head.Do Action Potentials Cause Consciousness?Blake Richard's episode discussing back-propagation in the brain (based on Matthew's experiments)



0:00 - Intro
5:31 - Background: Dendrites
23:20 - Cortical neuron bodies vs. branches
25:47 - Theories of cortex
30:49 - Feedforward and feedback hierarchy
37:40 - Dendritic integration hypothesis
44:32 - DIT vs. other consciousness theories
51:30 - Mac Shine Q1
1:04:38 - Are dendrites conceptually useful?
1:09:15 - Insights from implementation level
1:24:44 - How detailed to model?
1:28:15 - Do action potentials cause consciousness?
1:40:33 - Mac Shine Q2]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 138 Matthew Larkum: The Dendrite Hypothesis]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><a href="https://braininspired.co/bi-workshop-open-questions-opt-in/">Check out my short video series about what's missing in AI and Neuroscience.</a></p>



<a href="https://braininspired.co/bi-workshop-open-questions-opt-in/"></a>



<p class="has-text-align-center"><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p>Matthew Larkum runs his lab at Humboldt University of Berlin, where his group studies how dendrites contribute to  computations within and across layers of the neocortex. Since the late 1990s, Matthew has continued to uncover key properties of the way pyramidal neurons stretch across layers of the cortex, their dendrites receiving inputs from those different layers - and thus different brain areas. For example, layer 5 pyramidal neurons have a set of basal dendrites near the cell body that receives feedforward-like input, and a set of apical dendrites all the way up in layer 1 that receives feedback--like input. Depending on which set of dendrites is receiving input, or neither or both, the neuron's output functions in different modes- silent, regular spiking, or burst spiking. Matthew realized the different sets of dendritic inputs could signal different operations, often pairing feedforward sensory--like signals and feedback context-like signals. His research has shown this kind of coincidence detection is important for cognitive functions like perception, memory, learning, and even wakefulness. We discuss many of his ideas and research findings, why dendrites have long been neglected in favor of neuron cell bodies, the possibility of learning about computations by studying implementation-level phenomena, and much more.</p>



<ul><li><a href="https://www.projekte.hu-berlin.de/en/larkum">Larkum Lab</a>.</li><li>Twitter: <a href="https://twitter.com/mattlark">@mattlark</a>.</li><li>Related papers<ul><li><a href="https://www.cell.com/trends/cognitive-sciences/fulltext/S1364-6613(20)30175-3?_returnURL=https%3A%2F%2Flinkinghub.elsevier.com%2Fretrieve%2Fpii%2FS1364661320301753%3Fshowall%3Dtrue">Cellular Mechanisms of Conscious Processing</a>.</li><li><a href="https://pubmed.ncbi.nlm.nih.gov/33335033/">Perirhinal input to neocortical layer 1 controls learning</a>. (bioRxiv link: <a href="https://www.biorxiv.org/content/10.1101/713883v1">https://www.biorxiv.org/content/10.1101/713883v1</a>)</li><li><a href="https://doi.org/10.1016/j.neuroscience.2022.03.008">Are dendrites conceptually useful?</a></li><li><a href="https://doi.org/10.1126/science.abk1859">Memories off the top of your head</a>.</li><li><a href="https://www.researchgate.net/publication/359861041_Do_action_potentials_cause_consciousness">Do Action Potentials Cause Consciousness?</a></li></ul></li><li><a href="https://braininspired.co/podcast/9/">Blake Richard's episode</a> discussing back-propagation in the brain (based on Matthew's experiments)</li></ul>



<p>0:00 - Intro
5:31 - Background: Dendrites
23:20 - Cortical neuron bodies vs. branches
25:47 - Theories of cortex
30:49 - Feedforward and feedback hierarchy
37:40 - Dendritic integration hypothesis
44:32 - DIT vs. other consciousness theories
51:30 - Mac Shine Q1
1:04:38 - Are dendrites conceptually useful?
1:09:15 - Insights from implementation level
1:24:44 - How detailed to model?
1:28:15 - Do action potentials cause consciousness?
1:40:33 - Mac Shine Q2</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/6278849f-83e8-415f-84bc-e21257898ae6-138-Matthew-Larkum-public.mp3" length="107538486"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Check out my short video series about what's missing in AI and Neuroscience.







Support the show to get full episodes and join the Discord community.









Matthew Larkum runs his lab at Humboldt University of Berlin, where his group studies how dendrites contribute to  computations within and across layers of the neocortex. Since the late 1990s, Matthew has continued to uncover key properties of the way pyramidal neurons stretch across layers of the cortex, their dendrites receiving inputs from those different layers - and thus different brain areas. For example, layer 5 pyramidal neurons have a set of basal dendrites near the cell body that receives feedforward-like input, and a set of apical dendrites all the way up in layer 1 that receives feedback--like input. Depending on which set of dendrites is receiving input, or neither or both, the neuron's output functions in different modes- silent, regular spiking, or burst spiking. Matthew realized the different sets of dendritic inputs could signal different operations, often pairing feedforward sensory--like signals and feedback context-like signals. His research has shown this kind of coincidence detection is important for cognitive functions like perception, memory, learning, and even wakefulness. We discuss many of his ideas and research findings, why dendrites have long been neglected in favor of neuron cell bodies, the possibility of learning about computations by studying implementation-level phenomena, and much more.



Larkum Lab.Twitter: @mattlark.Related papersCellular Mechanisms of Conscious Processing.Perirhinal input to neocortical layer 1 controls learning. (bioRxiv link: https://www.biorxiv.org/content/10.1101/713883v1)Are dendrites conceptually useful?Memories off the top of your head.Do Action Potentials Cause Consciousness?Blake Richard's episode discussing back-propagation in the brain (based on Matthew's experiments)



0:00 - Intro
5:31 - Background: Dendrites
23:20 - Cortical neuron bodies vs. branches
25:47 - Theories of cortex
30:49 - Feedforward and feedback hierarchy
37:40 - Dendritic integration hypothesis
44:32 - DIT vs. other consciousness theories
51:30 - Mac Shine Q1
1:04:38 - Are dendrites conceptually useful?
1:09:15 - Insights from implementation level
1:24:44 - How detailed to model?
1:28:15 - Do action potentials cause consciousness?
1:40:33 - Mac Shine Q2]]>
                </itunes:summary>
                                                                            <itunes:duration>01:51:42</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 137 Brian Butterworth: Can Fish Count?]]>
                </title>
                <pubDate>Fri, 27 May 2022 17:48:32 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-137-brian-butterworth-can-fish-count</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-137-brian-butterworth-can-fish-count</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><a href="https://braininspired.co/bi-workshop-open-questions-opt-in/">Check out my short video series about what's missing in AI and Neuroscience.</a></p>



<a href="https://braininspired.co/bi-workshop-open-questions-opt-in/"></a>



<p class="has-text-align-center"><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community.</p>



<a href="https://www.patreon.com/braininspired"></a>







<p>Brian Butterworth is Emeritus Professor of Cognitive Neuropsychology at University College London. In his book, <a href="https://amzn.to/384caoB">Can Fish Count?: What Animals Reveal About Our Uniquely Mathematical Minds</a>, he describes the counting and numerical abilities across many different species, suggesting our ability to count is evolutionarily very old (since many diverse species can count). We discuss many of the examples in his book, the mathematical disability dyscalculia and its relation to dyslexia, how to test counting abilities in various species, how counting may happen in brains, the promise of creating artificial networks that can do math, and many more topics.</p>





<ul><li>Brian's website: <a href="https://www.mathematicalbrain.com/">The Mathematical Brain</a></li><li>Twitter: <a href="https://twitter.com/b_butterworth">@b_butterworth</a></li><li>The book:<ul><li><a href="https://amzn.to/384caoB">Can Fish Count?: What Animals Reveal About Our Uniquely Mathematical Minds</a></li></ul></li></ul>



<p>0:00 - Intro
3:19 - Why Counting?
5:31 - Dyscalculia
12:06 - Dyslexia
19:12 - Counting
26:37 - Origins of counting vs. language
34:48 - Counting vs. higher math
46:46 - Counting some things and not others
53:33 - How to test counting
1:03:30 - How does the brain count?
1:13:10 - Are numbers real?</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Check out my short video series about what's missing in AI and Neuroscience.







Support the show to get full episodes and join the Discord community.











Brian Butterworth is Emeritus Professor of Cognitive Neuropsychology at University College London. In his book, Can Fish Count?: What Animals Reveal About Our Uniquely Mathematical Minds, he describes the counting and numerical abilities across many different species, suggesting our ability to count is evolutionarily very old (since many diverse species can count). We discuss many of the examples in his book, the mathematical disability dyscalculia and its relation to dyslexia, how to test counting abilities in various species, how counting may happen in brains, the promise of creating artificial networks that can do math, and many more topics.





Brian's website: The Mathematical BrainTwitter: @b_butterworthThe book:Can Fish Count?: What Animals Reveal About Our Uniquely Mathematical Minds



0:00 - Intro
3:19 - Why Counting?
5:31 - Dyscalculia
12:06 - Dyslexia
19:12 - Counting
26:37 - Origins of counting vs. language
34:48 - Counting vs. higher math
46:46 - Counting some things and not others
53:33 - How to test counting
1:03:30 - How does the brain count?
1:13:10 - Are numbers real?]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 137 Brian Butterworth: Can Fish Count?]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><a href="https://braininspired.co/bi-workshop-open-questions-opt-in/">Check out my short video series about what's missing in AI and Neuroscience.</a></p>



<a href="https://braininspired.co/bi-workshop-open-questions-opt-in/"></a>



<p class="has-text-align-center"><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community.</p>



<a href="https://www.patreon.com/braininspired"></a>







<p>Brian Butterworth is Emeritus Professor of Cognitive Neuropsychology at University College London. In his book, <a href="https://amzn.to/384caoB">Can Fish Count?: What Animals Reveal About Our Uniquely Mathematical Minds</a>, he describes the counting and numerical abilities across many different species, suggesting our ability to count is evolutionarily very old (since many diverse species can count). We discuss many of the examples in his book, the mathematical disability dyscalculia and its relation to dyslexia, how to test counting abilities in various species, how counting may happen in brains, the promise of creating artificial networks that can do math, and many more topics.</p>





<ul><li>Brian's website: <a href="https://www.mathematicalbrain.com/">The Mathematical Brain</a></li><li>Twitter: <a href="https://twitter.com/b_butterworth">@b_butterworth</a></li><li>The book:<ul><li><a href="https://amzn.to/384caoB">Can Fish Count?: What Animals Reveal About Our Uniquely Mathematical Minds</a></li></ul></li></ul>



<p>0:00 - Intro
3:19 - Why Counting?
5:31 - Dyscalculia
12:06 - Dyslexia
19:12 - Counting
26:37 - Origins of counting vs. language
34:48 - Counting vs. higher math
46:46 - Counting some things and not others
53:33 - How to test counting
1:03:30 - How does the brain count?
1:13:10 - Are numbers real?</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/ff8b7d03-acc0-4dd4-81c1-b0e5924e49c5-137-Brian-Butterworth.mp3" length="75013076"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Check out my short video series about what's missing in AI and Neuroscience.







Support the show to get full episodes and join the Discord community.











Brian Butterworth is Emeritus Professor of Cognitive Neuropsychology at University College London. In his book, Can Fish Count?: What Animals Reveal About Our Uniquely Mathematical Minds, he describes the counting and numerical abilities across many different species, suggesting our ability to count is evolutionarily very old (since many diverse species can count). We discuss many of the examples in his book, the mathematical disability dyscalculia and its relation to dyslexia, how to test counting abilities in various species, how counting may happen in brains, the promise of creating artificial networks that can do math, and many more topics.





Brian's website: The Mathematical BrainTwitter: @b_butterworthThe book:Can Fish Count?: What Animals Reveal About Our Uniquely Mathematical Minds



0:00 - Intro
3:19 - Why Counting?
5:31 - Dyscalculia
12:06 - Dyslexia
19:12 - Counting
26:37 - Origins of counting vs. language
34:48 - Counting vs. higher math
46:46 - Counting some things and not others
53:33 - How to test counting
1:03:30 - How does the brain count?
1:13:10 - Are numbers real?]]>
                </itunes:summary>
                                                                            <itunes:duration>01:17:49</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 136 Michel Bitbol and Alex Gomez-Marin: Phenomenology]]>
                </title>
                <pubDate>Tue, 17 May 2022 14:54:42 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-136-michel-bitbol-and-alex-gomez-marin-phenomenomvf</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-136-michel-bitbol-and-alex-gomez-marin-phenomenomvf</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community.</p>



<a href="https://www.patreon.com/braininspired"></a>



<p class="has-text-align-center"><a href="https://braininspired.co/bi-workshop-open-questions-opt-in/">Check out my short video series about what's missing in AI and Neuroscience.</a></p>



<a href="https://braininspired.co/bi-workshop-open-questions-opt-in/"></a>







<p>Michel Bitbol is Director of Research at CNRS (Centre National de la Recherche Scientifique). Alex Gomez-Marin is a neuroscientist running his lab, The Behavior of Organisms Laboratory, at the Instituto de Neurociencias in Alicante. We discuss phenomenology as an alternative perspective on our scientific endeavors. Although we like to believe our science is objective and explains the reality of the world we inhabit, we can't escape the fact that all of our scientific knowledge comes through our perceptions and interpretations as conscious living beings. Michel has used phenomenology to resolve many of the paradoxes that quantum mechanics generates when it is understood as a description of reality, and more recently he has applied phenomenology to the philosophy of mind and consciousness. Alex is currently trying to apply the phenomenological approach to his research on brains and behavior. Much of our conversation revolves around how phenomenology and our "normal" scientific explorations can co-exist, including the study of minds, brains, and intelligence- our own and that of other organisms. We also discuss the "blind spot" of science, the history and practice of phenomenology, various kinds of explanation, the language we use to describe things, and more.</p>



<ul><li><a href="http://michel.bitbol.pagesperso-orange.fr/">Michel's website</a></li><li>Alex's Lab: <a href="https://behavior-of-organisms.org/">The Behavior of Organisms Laboratory</a>.</li><li>Twitter: <a href="https://twitter.com/behaviOrganisms">@behaviOrganisms</a> (Alex)</li><li>Related papers<ul><li><a href="https://rosa.uniroma1.it/rosa04/organisms/article/view/16437/15864">The Blind Spot of Neuroscience</a>  </li><li><a href="https://www.sciencedirect.com/science/article/pii/S0896627319307901">The Life of Behavior</a></li><li><a href="https://behavioroforganismsdotorg.files.wordpress.com/2019/11/gomezmarin2019bbs.pdf">A Clash of Umwelts</a> </li></ul></li><li>Related events:<ul><li><a href="https://paricenter.com/the-future-scientist-a-conversation-series/">The Future Scientist</a> (a conversation series)</li></ul></li></ul>



<p>0:00 - Intro
4:32 - The Blind Spot
15:53 - Phenomenology and interpretation
22:51 - Personal stories: appreciating phenomenology
37:42 - Quantum physics example
47:16 - Scientific explanation vs. phenomenological description
59:39 - How can phenomenology and science complement each other?
1:08:22 - Neurophenomenology
1:17:34 - Use of language
1:25:46 - Mutual constraints</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Support the show to get full episodes and join the Discord community.







Check out my short video series about what's missing in AI and Neuroscience.











Michel Bitbol is Director of Research at CNRS (Centre National de la Recherche Scientifique). Alex Gomez-Marin is a neuroscientist running his lab, The Behavior of Organisms Laboratory, at the Instituto de Neurociencias in Alicante. We discuss phenomenology as an alternative perspective on our scientific endeavors. Although we like to believe our science is objective and explains the reality of the world we inhabit, we can't escape the fact that all of our scientific knowledge comes through our perceptions and interpretations as conscious living beings. Michel has used phenomenology to resolve many of the paradoxes that quantum mechanics generates when it is understood as a description of reality, and more recently he has applied phenomenology to the philosophy of mind and consciousness. Alex is currently trying to apply the phenomenological approach to his research on brains and behavior. Much of our conversation revolves around how phenomenology and our "normal" scientific explorations can co-exist, including the study of minds, brains, and intelligence- our own and that of other organisms. We also discuss the "blind spot" of science, the history and practice of phenomenology, various kinds of explanation, the language we use to describe things, and more.



Michel's websiteAlex's Lab: The Behavior of Organisms Laboratory.Twitter: @behaviOrganisms (Alex)Related papersThe Blind Spot of Neuroscience  The Life of BehaviorA Clash of Umwelts Related events:The Future Scientist (a conversation series)



0:00 - Intro
4:32 - The Blind Spot
15:53 - Phenomenology and interpretation
22:51 - Personal stories: appreciating phenomenology
37:42 - Quantum physics example
47:16 - Scientific explanation vs. phenomenological description
59:39 - How can phenomenology and science complement each other?
1:08:22 - Neurophenomenology
1:17:34 - Use of language
1:25:46 - Mutual constraints]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 136 Michel Bitbol and Alex Gomez-Marin: Phenomenology]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community.</p>



<a href="https://www.patreon.com/braininspired"></a>



<p class="has-text-align-center"><a href="https://braininspired.co/bi-workshop-open-questions-opt-in/">Check out my short video series about what's missing in AI and Neuroscience.</a></p>



<a href="https://braininspired.co/bi-workshop-open-questions-opt-in/"></a>







<p>Michel Bitbol is Director of Research at CNRS (Centre National de la Recherche Scientifique). Alex Gomez-Marin is a neuroscientist running his lab, The Behavior of Organisms Laboratory, at the Instituto de Neurociencias in Alicante. We discuss phenomenology as an alternative perspective on our scientific endeavors. Although we like to believe our science is objective and explains the reality of the world we inhabit, we can't escape the fact that all of our scientific knowledge comes through our perceptions and interpretations as conscious living beings. Michel has used phenomenology to resolve many of the paradoxes that quantum mechanics generates when it is understood as a description of reality, and more recently he has applied phenomenology to the philosophy of mind and consciousness. Alex is currently trying to apply the phenomenological approach to his research on brains and behavior. Much of our conversation revolves around how phenomenology and our "normal" scientific explorations can co-exist, including the study of minds, brains, and intelligence- our own and that of other organisms. We also discuss the "blind spot" of science, the history and practice of phenomenology, various kinds of explanation, the language we use to describe things, and more.</p>



<ul><li><a href="http://michel.bitbol.pagesperso-orange.fr/">Michel's website</a></li><li>Alex's Lab: <a href="https://behavior-of-organisms.org/">The Behavior of Organisms Laboratory</a>.</li><li>Twitter: <a href="https://twitter.com/behaviOrganisms">@behaviOrganisms</a> (Alex)</li><li>Related papers<ul><li><a href="https://rosa.uniroma1.it/rosa04/organisms/article/view/16437/15864">The Blind Spot of Neuroscience</a>  </li><li><a href="https://www.sciencedirect.com/science/article/pii/S0896627319307901">The Life of Behavior</a></li><li><a href="https://behavioroforganismsdotorg.files.wordpress.com/2019/11/gomezmarin2019bbs.pdf">A Clash of Umwelts</a> </li></ul></li><li>Related events:<ul><li><a href="https://paricenter.com/the-future-scientist-a-conversation-series/">The Future Scientist</a> (a conversation series)</li></ul></li></ul>



<p>0:00 - Intro
4:32 - The Blind Spot
15:53 - Phenomenology and interpretation
22:51 - Personal stories: appreciating phenomenology
37:42 - Quantum physics example
47:16 - Scientific explanation vs. phenomenological description
59:39 - How can phenomenology and science complement each other?
1:08:22 - Neurophenomenology
1:17:34 - Use of language
1:25:46 - Mutual constraints</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/a235ad12-fe80-4603-89ed-9b1197f07ee6-136-Bitbol-Gomez-Marin-public.mp3" length="90731605"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Support the show to get full episodes and join the Discord community.







Check out my short video series about what's missing in AI and Neuroscience.











Michel Bitbol is Director of Research at CNRS (Centre National de la Recherche Scientifique). Alex Gomez-Marin is a neuroscientist running his lab, The Behavior of Organisms Laboratory, at the Instituto de Neurociencias in Alicante. We discuss phenomenology as an alternative perspective on our scientific endeavors. Although we like to believe our science is objective and explains the reality of the world we inhabit, we can't escape the fact that all of our scientific knowledge comes through our perceptions and interpretations as conscious living beings. Michel has used phenomenology to resolve many of the paradoxes that quantum mechanics generates when it is understood as a description of reality, and more recently he has applied phenomenology to the philosophy of mind and consciousness. Alex is currently trying to apply the phenomenological approach to his research on brains and behavior. Much of our conversation revolves around how phenomenology and our "normal" scientific explorations can co-exist, including the study of minds, brains, and intelligence- our own and that of other organisms. We also discuss the "blind spot" of science, the history and practice of phenomenology, various kinds of explanation, the language we use to describe things, and more.



Michel's websiteAlex's Lab: The Behavior of Organisms Laboratory.Twitter: @behaviOrganisms (Alex)Related papersThe Blind Spot of Neuroscience  The Life of BehaviorA Clash of Umwelts Related events:The Future Scientist (a conversation series)



0:00 - Intro
4:32 - The Blind Spot
15:53 - Phenomenology and interpretation
22:51 - Personal stories: appreciating phenomenology
37:42 - Quantum physics example
47:16 - Scientific explanation vs. phenomenological description
59:39 - How can phenomenology and science complement each other?
1:08:22 - Neurophenomenology
1:17:34 - Use of language
1:25:46 - Mutual constraints]]>
                </itunes:summary>
                                                                            <itunes:duration>01:34:12</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 135 Elena Galea: The Stars of the Brain]]>
                </title>
                <pubDate>Fri, 06 May 2022 22:12:25 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-135-elena-galea-the-stars-of-the-brain</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-135-elena-galea-the-stars-of-the-brain</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community.</p>



<a href="https://www.patreon.com/braininspired"></a>



<p class="has-text-align-center"><a href="https://braininspired.co/bi-workshop-open-questions-opt-in/">Check out my short video series about what's missing in AI and Neuroscience.</a></p>



<a href="https://braininspired.co/bi-workshop-open-questions-opt-in/"></a>





<p>Brains are often conceived as consisting of neurons and "everything else." As Elena discusses, the "everything else," including glial cells and in particular astrocytes, have largely been ignored in neuroscience. That's partly because the fast action potentials of neurons have been assumed to underlie computations in the brain, and because technology only recently afforded closer scrutiny of astrocyte activity. Now that we can record calcium signaling in astrocytes, it's possible to relate how astrocyte signaling with each other and with neurons may complement the cognitive roles once thought the sole domain of neurons. Although the computational role of astrocytes remains unclear, it is clear that astrocytes interact with neurons and neural circuits in dynamic and interesting ways. We talk about the historical story of astrocytes, the emerging modern story, and Elena shares her views on the path forward to understand astrocyte function in cognition, disease, homeostasis, and - Elena's favorite current hypothesis - their integrative role in negative feedback control.</p>



<ul><li><a href="https://www.icrea.cat/Web/ScientificStaff/elena-galea-248">Elena's website</a>.</li><li>Twitter: <a href="https://twitter.com/elenagalea1">@elenagalea1</a></li><li>Related papers<ul><li><a href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6832773/">A roadmap to integrate astrocytes into Systems Neuroscience</a>.</li><li>Elena recommended this paper: <a href="https://pubmed.ncbi.nlm.nih.gov/34139160/">Biological feedback control—Respect the loops</a>.</li></ul></li></ul>





<p>0:00 - Intro
5:23 - The changing story of astrocytes
14:58 - Astrocyte research lags neuroscience
19:45 - Types of astrocytes
23:06 - Astrocytes vs neurons
26:08 - Computational roles of astrocytes
35:45 - Feedback control
43:37 - Energy efficiency
46:25 - Current technology
52:58 - Computational astroscience
1:10:57 - Do names for things matter</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Support the show to get full episodes and join the Discord community.







Check out my short video series about what's missing in AI and Neuroscience.









Brains are often conceived as consisting of neurons and "everything else." As Elena discusses, the "everything else," including glial cells and in particular astrocytes, have largely been ignored in neuroscience. That's partly because the fast action potentials of neurons have been assumed to underlie computations in the brain, and because technology only recently afforded closer scrutiny of astrocyte activity. Now that we can record calcium signaling in astrocytes, it's possible to relate how astrocyte signaling with each other and with neurons may complement the cognitive roles once thought the sole domain of neurons. Although the computational role of astrocytes remains unclear, it is clear that astrocytes interact with neurons and neural circuits in dynamic and interesting ways. We talk about the historical story of astrocytes, the emerging modern story, and Elena shares her views on the path forward to understand astrocyte function in cognition, disease, homeostasis, and - Elena's favorite current hypothesis - their integrative role in negative feedback control.



Elena's website.Twitter: @elenagalea1Related papersA roadmap to integrate astrocytes into Systems Neuroscience.Elena recommended this paper: Biological feedback control—Respect the loops.





0:00 - Intro
5:23 - The changing story of astrocytes
14:58 - Astrocyte research lags neuroscience
19:45 - Types of astrocytes
23:06 - Astrocytes vs neurons
26:08 - Computational roles of astrocytes
35:45 - Feedback control
43:37 - Energy efficiency
46:25 - Current technology
52:58 - Computational astroscience
1:10:57 - Do names for things matter]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 135 Elena Galea: The Stars of the Brain]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community.</p>



<a href="https://www.patreon.com/braininspired"></a>



<p class="has-text-align-center"><a href="https://braininspired.co/bi-workshop-open-questions-opt-in/">Check out my short video series about what's missing in AI and Neuroscience.</a></p>



<a href="https://braininspired.co/bi-workshop-open-questions-opt-in/"></a>





<p>Brains are often conceived as consisting of neurons and "everything else." As Elena discusses, the "everything else," including glial cells and in particular astrocytes, have largely been ignored in neuroscience. That's partly because the fast action potentials of neurons have been assumed to underlie computations in the brain, and because technology only recently afforded closer scrutiny of astrocyte activity. Now that we can record calcium signaling in astrocytes, it's possible to relate how astrocyte signaling with each other and with neurons may complement the cognitive roles once thought the sole domain of neurons. Although the computational role of astrocytes remains unclear, it is clear that astrocytes interact with neurons and neural circuits in dynamic and interesting ways. We talk about the historical story of astrocytes, the emerging modern story, and Elena shares her views on the path forward to understand astrocyte function in cognition, disease, homeostasis, and - Elena's favorite current hypothesis - their integrative role in negative feedback control.</p>



<ul><li><a href="https://www.icrea.cat/Web/ScientificStaff/elena-galea-248">Elena's website</a>.</li><li>Twitter: <a href="https://twitter.com/elenagalea1">@elenagalea1</a></li><li>Related papers<ul><li><a href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6832773/">A roadmap to integrate astrocytes into Systems Neuroscience</a>.</li><li>Elena recommended this paper: <a href="https://pubmed.ncbi.nlm.nih.gov/34139160/">Biological feedback control—Respect the loops</a>.</li></ul></li></ul>





<p>0:00 - Intro
5:23 - The changing story of astrocytes
14:58 - Astrocyte research lags neuroscience
19:45 - Types of astrocytes
23:06 - Astrocytes vs neurons
26:08 - Computational roles of astrocytes
35:45 - Feedback control
43:37 - Energy efficiency
46:25 - Current technology
52:58 - Computational astroscience
1:10:57 - Do names for things matter</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/f3b26544-7339-4f21-ab54-ae9bcc3fbab0-135-Elena-Galea-public.mp3" length="74617991"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Support the show to get full episodes and join the Discord community.







Check out my short video series about what's missing in AI and Neuroscience.









Brains are often conceived as consisting of neurons and "everything else." As Elena discusses, the "everything else," including glial cells and in particular astrocytes, have largely been ignored in neuroscience. That's partly because the fast action potentials of neurons have been assumed to underlie computations in the brain, and because technology only recently afforded closer scrutiny of astrocyte activity. Now that we can record calcium signaling in astrocytes, it's possible to relate how astrocyte signaling with each other and with neurons may complement the cognitive roles once thought the sole domain of neurons. Although the computational role of astrocytes remains unclear, it is clear that astrocytes interact with neurons and neural circuits in dynamic and interesting ways. We talk about the historical story of astrocytes, the emerging modern story, and Elena shares her views on the path forward to understand astrocyte function in cognition, disease, homeostasis, and - Elena's favorite current hypothesis - their integrative role in negative feedback control.



Elena's website.Twitter: @elenagalea1Related papersA roadmap to integrate astrocytes into Systems Neuroscience.Elena recommended this paper: Biological feedback control—Respect the loops.





0:00 - Intro
5:23 - The changing story of astrocytes
14:58 - Astrocyte research lags neuroscience
19:45 - Types of astrocytes
23:06 - Astrocytes vs neurons
26:08 - Computational roles of astrocytes
35:45 - Feedback control
43:37 - Energy efficiency
46:25 - Current technology
52:58 - Computational astroscience
1:10:57 - Do names for things matter]]>
                </itunes:summary>
                                                                            <itunes:duration>01:17:25</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 134 Mandyam Srinivasan: Bee Flight and Cognition]]>
                </title>
                <pubDate>Wed, 27 Apr 2022 16:11:44 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-134-mandyam-srinivasan-bee-flight-and-cognition</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-134-mandyam-srinivasan-bee-flight-and-cognition</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community.</p>



<a href="https://www.patreon.com/braininspired"></a>



<p><a href="https://braininspired.co/bi-workshop-open-questions-opt-in/">Check out my short video series about what's missing in AI and Neuroscience.</a></p>



<a href="https://braininspired.co/bi-workshop-open-questions-opt-in/"></a>





<p>Srini is Emeritus Professor at Queensland Brain Institute in Australia. In this episode, he shares his wide range of behavioral experiments elucidating the principles of flight and navigation in insects. We discuss how bees use optic flow signals to determine their speed, distance, proximity to objects, and to gracefully land. These abilities are largely governed via control systems, balancing incoming perceptual signals with internal reference signals. We also talk about a few of the aerial robotics projects his research has inspired, many of the other cognitive skills bees can learn, the possibility of their feeling pain , and the nature of their possible subjective conscious experience.</p>



<ul><li><a href="https://qbi.uq.edu.au/profile/613/srini-srinivasan">Srini's Website</a>.</li><li>Related papers<ul><li><a href="https://www.sciencedirect.com/science/article/abs/pii/S0006291X20317940">Vision, perception, navigation and 'cognition' in honeybees and applications to aerial robotics</a>.</li></ul></li></ul>





<p>0:00 - Intro
3:34 - Background
8:20 - Bee experiments
14:30 - Bee flight and navigation
28:05 - Landing
33:06 - Umwelt and perception
37:26 - Bee-inspired aerial robotics
49:10 - Motion camouflage
51:52 - Cognition in bees
1:03:10 - Small vs. big brains
1:06:42 - Pain in bees
1:12:50 - Subjective experience
1:15:25 - Deep learning
1:23:00 - Path forward</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Support the show to get full episodes and join the Discord community.







Check out my short video series about what's missing in AI and Neuroscience.









Srini is Emeritus Professor at Queensland Brain Institute in Australia. In this episode, he shares his wide range of behavioral experiments elucidating the principles of flight and navigation in insects. We discuss how bees use optic flow signals to determine their speed, distance, proximity to objects, and to gracefully land. These abilities are largely governed via control systems, balancing incoming perceptual signals with internal reference signals. We also talk about a few of the aerial robotics projects his research has inspired, many of the other cognitive skills bees can learn, the possibility of their feeling pain , and the nature of their possible subjective conscious experience.



Srini's Website.Related papersVision, perception, navigation and 'cognition' in honeybees and applications to aerial robotics.





0:00 - Intro
3:34 - Background
8:20 - Bee experiments
14:30 - Bee flight and navigation
28:05 - Landing
33:06 - Umwelt and perception
37:26 - Bee-inspired aerial robotics
49:10 - Motion camouflage
51:52 - Cognition in bees
1:03:10 - Small vs. big brains
1:06:42 - Pain in bees
1:12:50 - Subjective experience
1:15:25 - Deep learning
1:23:00 - Path forward]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 134 Mandyam Srinivasan: Bee Flight and Cognition]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community.</p>



<a href="https://www.patreon.com/braininspired"></a>



<p><a href="https://braininspired.co/bi-workshop-open-questions-opt-in/">Check out my short video series about what's missing in AI and Neuroscience.</a></p>



<a href="https://braininspired.co/bi-workshop-open-questions-opt-in/"></a>





<p>Srini is Emeritus Professor at Queensland Brain Institute in Australia. In this episode, he shares his wide range of behavioral experiments elucidating the principles of flight and navigation in insects. We discuss how bees use optic flow signals to determine their speed, distance, proximity to objects, and to gracefully land. These abilities are largely governed via control systems, balancing incoming perceptual signals with internal reference signals. We also talk about a few of the aerial robotics projects his research has inspired, many of the other cognitive skills bees can learn, the possibility of their feeling pain , and the nature of their possible subjective conscious experience.</p>



<ul><li><a href="https://qbi.uq.edu.au/profile/613/srini-srinivasan">Srini's Website</a>.</li><li>Related papers<ul><li><a href="https://www.sciencedirect.com/science/article/abs/pii/S0006291X20317940">Vision, perception, navigation and 'cognition' in honeybees and applications to aerial robotics</a>.</li></ul></li></ul>





<p>0:00 - Intro
3:34 - Background
8:20 - Bee experiments
14:30 - Bee flight and navigation
28:05 - Landing
33:06 - Umwelt and perception
37:26 - Bee-inspired aerial robotics
49:10 - Motion camouflage
51:52 - Cognition in bees
1:03:10 - Small vs. big brains
1:06:42 - Pain in bees
1:12:50 - Subjective experience
1:15:25 - Deep learning
1:23:00 - Path forward</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/bd14131f-7725-446c-9a9c-b6ef3ca22a65-134-Srini-public.mp3" length="83140699"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Support the show to get full episodes and join the Discord community.







Check out my short video series about what's missing in AI and Neuroscience.









Srini is Emeritus Professor at Queensland Brain Institute in Australia. In this episode, he shares his wide range of behavioral experiments elucidating the principles of flight and navigation in insects. We discuss how bees use optic flow signals to determine their speed, distance, proximity to objects, and to gracefully land. These abilities are largely governed via control systems, balancing incoming perceptual signals with internal reference signals. We also talk about a few of the aerial robotics projects his research has inspired, many of the other cognitive skills bees can learn, the possibility of their feeling pain , and the nature of their possible subjective conscious experience.



Srini's Website.Related papersVision, perception, navigation and 'cognition' in honeybees and applications to aerial robotics.





0:00 - Intro
3:34 - Background
8:20 - Bee experiments
14:30 - Bee flight and navigation
28:05 - Landing
33:06 - Umwelt and perception
37:26 - Bee-inspired aerial robotics
49:10 - Motion camouflage
51:52 - Cognition in bees
1:03:10 - Small vs. big brains
1:06:42 - Pain in bees
1:12:50 - Subjective experience
1:15:25 - Deep learning
1:23:00 - Path forward]]>
                </itunes:summary>
                                                                            <itunes:duration>01:26:17</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 133 Ken Paller: Lucid Dreaming, Memory, and Sleep]]>
                </title>
                <pubDate>Fri, 15 Apr 2022 17:06:09 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-133-ken-paller-lucid-dreaming-memory-and-sleep</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-133-ken-paller-lucid-dreaming-memory-and-sleep</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center"><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community.</p>



<a href="https://www.patreon.com/braininspired"></a>



<p><a href="https://braininspired.co/bi-workshop-open-questions-opt-in/">Check out my short video series about what's missing in AI and Neuroscience.</a></p>



<a href="https://braininspired.co/bi-workshop-open-questions-opt-in/"></a>





<p>Ken discusses the recent work in his lab that allows communication with subjects while they experience lucid dreams. This new paradigm opens many avenues to study the neuroscience and psychology of consciousness, sleep, dreams, memory, and learning, and to improve and optimize sleep for cognition. Ken and his team are developing a Lucid Dreaming App which is <a href="https://pallerlab.psych.northwestern.edu/dream.html">freely available via his lab</a>. We also discuss much of his work on memory and learning in general and specifically related to sleep, like reactivating specific memories during sleep to improve learning.</p>



<ul><li><a href="https://pallerlab.psych.northwestern.edu/">Ken's Cognitive Neuroscience Laboratory</a>.</li><li>Twitter: <a href="https://twitter.com/kap101">@kap101</a>.</li><li><a href="https://pallerlab.psych.northwestern.edu/dream.html">The Lucid Dreaming App</a>.</li><li>Related papers<ul><li><a href="https://par.nsf.gov/servlets/purl/10275880">Memory and Sleep: How Sleep Cognition Can Change the Waking Mind for the Better</a>.</li><li><a href="https://www.sciencedirect.com/science/article/abs/pii/S1074742721000642">Does memory reactivation during sleep support generalization at the cost of memory specifics?</a></li><li><a href="https://www.cell.com/current-biology/pdf/S0960-9822(21)00059-2.pdf">Real-time dialogue between experimenters and dreamers during REM sleep.</a></li></ul></li></ul>





<p>0:00 - Intro
2:48 - Background and types of memory
14:44 -Consciousness and memory
23:32 - Phases and sleep and wakefulness
28:19 - Sleep, memory, and learning
33:50 - Targeted memory reactivation
48:34 - Problem solving during sleep
51:50 - 2-way communication with lucid dreamers
1:01:43 - Confounds to the paradigm
1:04:50 - Limitations and future studies
1:09:35 - Lucid dreaming app
1:13:47 - How sleep can inform AI
1:20:18 - Advice for students</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Support the show to get full episodes and join the Discord community.







Check out my short video series about what's missing in AI and Neuroscience.









Ken discusses the recent work in his lab that allows communication with subjects while they experience lucid dreams. This new paradigm opens many avenues to study the neuroscience and psychology of consciousness, sleep, dreams, memory, and learning, and to improve and optimize sleep for cognition. Ken and his team are developing a Lucid Dreaming App which is freely available via his lab. We also discuss much of his work on memory and learning in general and specifically related to sleep, like reactivating specific memories during sleep to improve learning.



Ken's Cognitive Neuroscience Laboratory.Twitter: @kap101.The Lucid Dreaming App.Related papersMemory and Sleep: How Sleep Cognition Can Change the Waking Mind for the Better.Does memory reactivation during sleep support generalization at the cost of memory specifics?Real-time dialogue between experimenters and dreamers during REM sleep.





0:00 - Intro
2:48 - Background and types of memory
14:44 -Consciousness and memory
23:32 - Phases and sleep and wakefulness
28:19 - Sleep, memory, and learning
33:50 - Targeted memory reactivation
48:34 - Problem solving during sleep
51:50 - 2-way communication with lucid dreamers
1:01:43 - Confounds to the paradigm
1:04:50 - Limitations and future studies
1:09:35 - Lucid dreaming app
1:13:47 - How sleep can inform AI
1:20:18 - Advice for students]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 133 Ken Paller: Lucid Dreaming, Memory, and Sleep]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center"><a href="https://www.patreon.com/braininspired">Support the show</a> to get full episodes and join the Discord community.</p>



<a href="https://www.patreon.com/braininspired"></a>



<p><a href="https://braininspired.co/bi-workshop-open-questions-opt-in/">Check out my short video series about what's missing in AI and Neuroscience.</a></p>



<a href="https://braininspired.co/bi-workshop-open-questions-opt-in/"></a>





<p>Ken discusses the recent work in his lab that allows communication with subjects while they experience lucid dreams. This new paradigm opens many avenues to study the neuroscience and psychology of consciousness, sleep, dreams, memory, and learning, and to improve and optimize sleep for cognition. Ken and his team are developing a Lucid Dreaming App which is <a href="https://pallerlab.psych.northwestern.edu/dream.html">freely available via his lab</a>. We also discuss much of his work on memory and learning in general and specifically related to sleep, like reactivating specific memories during sleep to improve learning.</p>



<ul><li><a href="https://pallerlab.psych.northwestern.edu/">Ken's Cognitive Neuroscience Laboratory</a>.</li><li>Twitter: <a href="https://twitter.com/kap101">@kap101</a>.</li><li><a href="https://pallerlab.psych.northwestern.edu/dream.html">The Lucid Dreaming App</a>.</li><li>Related papers<ul><li><a href="https://par.nsf.gov/servlets/purl/10275880">Memory and Sleep: How Sleep Cognition Can Change the Waking Mind for the Better</a>.</li><li><a href="https://www.sciencedirect.com/science/article/abs/pii/S1074742721000642">Does memory reactivation during sleep support generalization at the cost of memory specifics?</a></li><li><a href="https://www.cell.com/current-biology/pdf/S0960-9822(21)00059-2.pdf">Real-time dialogue between experimenters and dreamers during REM sleep.</a></li></ul></li></ul>





<p>0:00 - Intro
2:48 - Background and types of memory
14:44 -Consciousness and memory
23:32 - Phases and sleep and wakefulness
28:19 - Sleep, memory, and learning
33:50 - Targeted memory reactivation
48:34 - Problem solving during sleep
51:50 - 2-way communication with lucid dreamers
1:01:43 - Confounds to the paradigm
1:04:50 - Limitations and future studies
1:09:35 - Lucid dreaming app
1:13:47 - How sleep can inform AI
1:20:18 - Advice for students</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/22660310-1039-4f41-8459-0e3f706b968c-133-Ken-Paller-public.mp3" length="85965436"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Support the show to get full episodes and join the Discord community.







Check out my short video series about what's missing in AI and Neuroscience.









Ken discusses the recent work in his lab that allows communication with subjects while they experience lucid dreams. This new paradigm opens many avenues to study the neuroscience and psychology of consciousness, sleep, dreams, memory, and learning, and to improve and optimize sleep for cognition. Ken and his team are developing a Lucid Dreaming App which is freely available via his lab. We also discuss much of his work on memory and learning in general and specifically related to sleep, like reactivating specific memories during sleep to improve learning.



Ken's Cognitive Neuroscience Laboratory.Twitter: @kap101.The Lucid Dreaming App.Related papersMemory and Sleep: How Sleep Cognition Can Change the Waking Mind for the Better.Does memory reactivation during sleep support generalization at the cost of memory specifics?Real-time dialogue between experimenters and dreamers during REM sleep.





0:00 - Intro
2:48 - Background and types of memory
14:44 -Consciousness and memory
23:32 - Phases and sleep and wakefulness
28:19 - Sleep, memory, and learning
33:50 - Targeted memory reactivation
48:34 - Problem solving during sleep
51:50 - 2-way communication with lucid dreamers
1:01:43 - Confounds to the paradigm
1:04:50 - Limitations and future studies
1:09:35 - Lucid dreaming app
1:13:47 - How sleep can inform AI
1:20:18 - Advice for students]]>
                </itunes:summary>
                                                                            <itunes:duration>01:29:14</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 132 Ila Fiete: A Grid Scaffold for Memory]]>
                </title>
                <pubDate>Sun, 03 Apr 2022 15:31:18 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-132-ila-fiete-a-grid-scaffold-for-memory</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-132-ila-fiete-a-grid-scaffold-for-memory</link>
                                <description>
                                            <![CDATA[Announcement:



<p>I'm releasing my Neuro-AI course April 10-13, after which it will be closed for some time. <a href="https://braininspired.co/bi-workshop-2/">Learn more here.</a></p>





<p class="has-text-align-center">Support the show to get full episodes and join the Discord community.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p>Ila discusses her theoretical neuroscience work suggesting how our memories are formed within the cognitive maps we use to navigate the world and navigate our thoughts. The main idea is that grid cell networks in the entorhinal cortex internally generate a structured scaffold, which gets sent to the hippocampus. Neurons in the hippocampus, like the well-known place cells, receive that scaffolding and also receive external signals from the neocortex- signals about what's happening in the world and in our thoughts. Thus, the place cells act to "pin" what's happening in our neocortex to the scaffold, forming a memory. We also discuss her background as a physicist and her approach as a "neurophysicist", and a review she's publishing all about the many brain areas and cognitive functions being explained as attractor landscapes within a dynamical systems framework.</p>



<ul><li><a href="https://fietelab.mit.edu/">The Fiete Lab</a>.</li><li>Related papers<ul><li><a href="https://www.biorxiv.org/content/10.1101/2021.11.20.469406v1.article-info">A structured scaffold underlies activity in the hippocampus.</a></li><li><a href="https://arxiv.org/abs/2112.03978">Attractor and integrator networks in the brain.</a></li></ul></li></ul>





<p>0:00 - Intro
3:36 - "Neurophysicist"
9:30 - Bottom-up vs. top-down
15:57 - Tool scavenging
18:21 - Cognitive maps and hippocampus
22:40 - Hopfield networks
27:56 - Internal scaffold
38:42 - Place cells
43:44 - Grid cells
54:22 - Grid cells encoding place cells
59:39 - Scaffold model: stacked hopfield networks
1:05:39 - Attractor landscapes
1:09:22 - Landscapes across scales
1:12:27 - Dimensionality of landscapes</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Announcement:



I'm releasing my Neuro-AI course April 10-13, after which it will be closed for some time. Learn more here.





Support the show to get full episodes and join the Discord community.









Ila discusses her theoretical neuroscience work suggesting how our memories are formed within the cognitive maps we use to navigate the world and navigate our thoughts. The main idea is that grid cell networks in the entorhinal cortex internally generate a structured scaffold, which gets sent to the hippocampus. Neurons in the hippocampus, like the well-known place cells, receive that scaffolding and also receive external signals from the neocortex- signals about what's happening in the world and in our thoughts. Thus, the place cells act to "pin" what's happening in our neocortex to the scaffold, forming a memory. We also discuss her background as a physicist and her approach as a "neurophysicist", and a review she's publishing all about the many brain areas and cognitive functions being explained as attractor landscapes within a dynamical systems framework.



The Fiete Lab.Related papersA structured scaffold underlies activity in the hippocampus.Attractor and integrator networks in the brain.





0:00 - Intro
3:36 - "Neurophysicist"
9:30 - Bottom-up vs. top-down
15:57 - Tool scavenging
18:21 - Cognitive maps and hippocampus
22:40 - Hopfield networks
27:56 - Internal scaffold
38:42 - Place cells
43:44 - Grid cells
54:22 - Grid cells encoding place cells
59:39 - Scaffold model: stacked hopfield networks
1:05:39 - Attractor landscapes
1:09:22 - Landscapes across scales
1:12:27 - Dimensionality of landscapes]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 132 Ila Fiete: A Grid Scaffold for Memory]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[Announcement:



<p>I'm releasing my Neuro-AI course April 10-13, after which it will be closed for some time. <a href="https://braininspired.co/bi-workshop-2/">Learn more here.</a></p>





<p class="has-text-align-center">Support the show to get full episodes and join the Discord community.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p>Ila discusses her theoretical neuroscience work suggesting how our memories are formed within the cognitive maps we use to navigate the world and navigate our thoughts. The main idea is that grid cell networks in the entorhinal cortex internally generate a structured scaffold, which gets sent to the hippocampus. Neurons in the hippocampus, like the well-known place cells, receive that scaffolding and also receive external signals from the neocortex- signals about what's happening in the world and in our thoughts. Thus, the place cells act to "pin" what's happening in our neocortex to the scaffold, forming a memory. We also discuss her background as a physicist and her approach as a "neurophysicist", and a review she's publishing all about the many brain areas and cognitive functions being explained as attractor landscapes within a dynamical systems framework.</p>



<ul><li><a href="https://fietelab.mit.edu/">The Fiete Lab</a>.</li><li>Related papers<ul><li><a href="https://www.biorxiv.org/content/10.1101/2021.11.20.469406v1.article-info">A structured scaffold underlies activity in the hippocampus.</a></li><li><a href="https://arxiv.org/abs/2112.03978">Attractor and integrator networks in the brain.</a></li></ul></li></ul>





<p>0:00 - Intro
3:36 - "Neurophysicist"
9:30 - Bottom-up vs. top-down
15:57 - Tool scavenging
18:21 - Cognitive maps and hippocampus
22:40 - Hopfield networks
27:56 - Internal scaffold
38:42 - Place cells
43:44 - Grid cells
54:22 - Grid cells encoding place cells
59:39 - Scaffold model: stacked hopfield networks
1:05:39 - Attractor landscapes
1:09:22 - Landscapes across scales
1:12:27 - Dimensionality of landscapes</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/3c1f3acd-99ee-4686-bca5-69f8c0193af1-132-Ila-Fiete-public.mp3" length="74535459"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Announcement:



I'm releasing my Neuro-AI course April 10-13, after which it will be closed for some time. Learn more here.





Support the show to get full episodes and join the Discord community.









Ila discusses her theoretical neuroscience work suggesting how our memories are formed within the cognitive maps we use to navigate the world and navigate our thoughts. The main idea is that grid cell networks in the entorhinal cortex internally generate a structured scaffold, which gets sent to the hippocampus. Neurons in the hippocampus, like the well-known place cells, receive that scaffolding and also receive external signals from the neocortex- signals about what's happening in the world and in our thoughts. Thus, the place cells act to "pin" what's happening in our neocortex to the scaffold, forming a memory. We also discuss her background as a physicist and her approach as a "neurophysicist", and a review she's publishing all about the many brain areas and cognitive functions being explained as attractor landscapes within a dynamical systems framework.



The Fiete Lab.Related papersA structured scaffold underlies activity in the hippocampus.Attractor and integrator networks in the brain.





0:00 - Intro
3:36 - "Neurophysicist"
9:30 - Bottom-up vs. top-down
15:57 - Tool scavenging
18:21 - Cognitive maps and hippocampus
22:40 - Hopfield networks
27:56 - Internal scaffold
38:42 - Place cells
43:44 - Grid cells
54:22 - Grid cells encoding place cells
59:39 - Scaffold model: stacked hopfield networks
1:05:39 - Attractor landscapes
1:09:22 - Landscapes across scales
1:12:27 - Dimensionality of landscapes]]>
                </itunes:summary>
                                                                            <itunes:duration>01:17:20</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 131 Sri Ramaswamy and Jie Mei: Neuromodulation-aware DNNs]]>
                </title>
                <pubDate>Sat, 26 Mar 2022 05:11:39 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-131-sri-ramaswamy-and-jie-mei-neuromodulation-awr7o</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-131-sri-ramaswamy-and-jie-mei-neuromodulation-awr7o</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center">Support the show to get full episodes and join the Discord community.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p>Sri and Mei join me to discuss how including principles of neuromodulation in deep learning networks may improve network performance. It's an ever-present question how much detail to include in models, and we are in the early stages of learning how neuromodulators and their interactions shape biological brain function. But as we continue to learn more, Sri and Mei are interested in building "neuromodulation-aware DNNs".</p>



<ul><li><a href="https://blogs.ncl.ac.uk/srikanthramaswamy/">Neural Circuits Laboratory</a>.</li><li>Twitter: Sri: <a href="https://twitter.com/srikipedia">@srikipedia</a>; Jie: <a href="https://twitter.com/neuro_Mei">@neuro_Mei</a>.</li><li>Related papers<ul><li><a href="https://www.cell.com/trends/neurosciences/pdf/S0166-2236(21)00256-3.pdf">Informing deep neural networks by multiscale principles of neuromodulatory systems</a>.</li></ul></li></ul>





<p>0:00 - Intro
3:10 - Background
9:19 - Bottom-up vs. top-down
14:42 - Levels of abstraction
22:46 - Biological neuromodulation
33:18 - Inventing neuromodulators
41:10 - How far along are we?
53:31 - Multiple realizability
1:09:40 -Modeling dendrites
1:15:24 - Across-species neuromodulation</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Support the show to get full episodes and join the Discord community.









Sri and Mei join me to discuss how including principles of neuromodulation in deep learning networks may improve network performance. It's an ever-present question how much detail to include in models, and we are in the early stages of learning how neuromodulators and their interactions shape biological brain function. But as we continue to learn more, Sri and Mei are interested in building "neuromodulation-aware DNNs".



Neural Circuits Laboratory.Twitter: Sri: @srikipedia; Jie: @neuro_Mei.Related papersInforming deep neural networks by multiscale principles of neuromodulatory systems.





0:00 - Intro
3:10 - Background
9:19 - Bottom-up vs. top-down
14:42 - Levels of abstraction
22:46 - Biological neuromodulation
33:18 - Inventing neuromodulators
41:10 - How far along are we?
53:31 - Multiple realizability
1:09:40 -Modeling dendrites
1:15:24 - Across-species neuromodulation]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 131 Sri Ramaswamy and Jie Mei: Neuromodulation-aware DNNs]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center">Support the show to get full episodes and join the Discord community.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p>Sri and Mei join me to discuss how including principles of neuromodulation in deep learning networks may improve network performance. It's an ever-present question how much detail to include in models, and we are in the early stages of learning how neuromodulators and their interactions shape biological brain function. But as we continue to learn more, Sri and Mei are interested in building "neuromodulation-aware DNNs".</p>



<ul><li><a href="https://blogs.ncl.ac.uk/srikanthramaswamy/">Neural Circuits Laboratory</a>.</li><li>Twitter: Sri: <a href="https://twitter.com/srikipedia">@srikipedia</a>; Jie: <a href="https://twitter.com/neuro_Mei">@neuro_Mei</a>.</li><li>Related papers<ul><li><a href="https://www.cell.com/trends/neurosciences/pdf/S0166-2236(21)00256-3.pdf">Informing deep neural networks by multiscale principles of neuromodulatory systems</a>.</li></ul></li></ul>





<p>0:00 - Intro
3:10 - Background
9:19 - Bottom-up vs. top-down
14:42 - Levels of abstraction
22:46 - Biological neuromodulation
33:18 - Inventing neuromodulators
41:10 - How far along are we?
53:31 - Multiple realizability
1:09:40 -Modeling dendrites
1:15:24 - Across-species neuromodulation</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/e33ef531-8033-49b2-a096-2bed34e23dcb-131-Sri-Mei-public.mp3" length="83690092"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Support the show to get full episodes and join the Discord community.









Sri and Mei join me to discuss how including principles of neuromodulation in deep learning networks may improve network performance. It's an ever-present question how much detail to include in models, and we are in the early stages of learning how neuromodulators and their interactions shape biological brain function. But as we continue to learn more, Sri and Mei are interested in building "neuromodulation-aware DNNs".



Neural Circuits Laboratory.Twitter: Sri: @srikipedia; Jie: @neuro_Mei.Related papersInforming deep neural networks by multiscale principles of neuromodulatory systems.





0:00 - Intro
3:10 - Background
9:19 - Bottom-up vs. top-down
14:42 - Levels of abstraction
22:46 - Biological neuromodulation
33:18 - Inventing neuromodulators
41:10 - How far along are we?
53:31 - Multiple realizability
1:09:40 -Modeling dendrites
1:15:24 - Across-species neuromodulation]]>
                </itunes:summary>
                                                                            <itunes:duration>01:26:52</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 130 Eve Marder: Modulation of Networks]]>
                </title>
                <pubDate>Sun, 13 Mar 2022 14:54:05 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-130-eve-marder-modulation-of-networks</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-130-eve-marder-modulation-of-networks</link>
                                <description>
                                            <![CDATA[<a href="https://www.patreon.com/braininspired"></a>



<p class="has-text-align-center">Support the show to get full episodes and join the Discord community.</p>





<p>Eve discusses many of the lessons she has learned studying a small nervous system, the crustacean stomatogastric nervous system (STG). The STG has only about 30 neurons and its connections and neurophysiology are well-understood. Yet Eve's work has shown it functions under a remarkable diversity of conditions, and does so is a remarkable variety of ways. We discuss her work on the STG specifically, and what her work implies about trying to study much larger nervous systems, like our human brains.</p>



<ul><li><a href="http://blogs.brandeis.edu/marderlab/">The Marder Lab</a>.</li><li>Twitter: <a href="https://twitter.com/MarderLab">@MarderLab</a>.</li><li>Related to our conversation:<ul><li><a href="https://journals.plos.org/plosbiology/article?id=10.1371/journal.pbio.1002147">Understanding Brains: Details, Intuition, and Big Data</a>.</li><li><a href="http://sites.iiserpune.ac.in/~raghav/pdfs/animalbehavior/ReadingList/getting_1989.pdf">Emerging principles governing the operation of neural networks</a> (Eve mentions this regarding "building blocks" of neural networks).</li></ul></li></ul>





<p>0:00 - Intro
3:58 - Background
8:00 - Levels of ambiguity
9:47 - Stomatogastric nervous system
17:13 - Structure vs. function
26:08 - Role of theory
34:56 - Technology vs. understanding
38:25 - Higher cognitive function
44:35 - Adaptability, resilience, evolution
50:23 - Climate change
56:11 - Deep learning
57:12 - Dynamical systems</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[



Support the show to get full episodes and join the Discord community.





Eve discusses many of the lessons she has learned studying a small nervous system, the crustacean stomatogastric nervous system (STG). The STG has only about 30 neurons and its connections and neurophysiology are well-understood. Yet Eve's work has shown it functions under a remarkable diversity of conditions, and does so is a remarkable variety of ways. We discuss her work on the STG specifically, and what her work implies about trying to study much larger nervous systems, like our human brains.



The Marder Lab.Twitter: @MarderLab.Related to our conversation:Understanding Brains: Details, Intuition, and Big Data.Emerging principles governing the operation of neural networks (Eve mentions this regarding "building blocks" of neural networks).





0:00 - Intro
3:58 - Background
8:00 - Levels of ambiguity
9:47 - Stomatogastric nervous system
17:13 - Structure vs. function
26:08 - Role of theory
34:56 - Technology vs. understanding
38:25 - Higher cognitive function
44:35 - Adaptability, resilience, evolution
50:23 - Climate change
56:11 - Deep learning
57:12 - Dynamical systems]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 130 Eve Marder: Modulation of Networks]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<a href="https://www.patreon.com/braininspired"></a>



<p class="has-text-align-center">Support the show to get full episodes and join the Discord community.</p>





<p>Eve discusses many of the lessons she has learned studying a small nervous system, the crustacean stomatogastric nervous system (STG). The STG has only about 30 neurons and its connections and neurophysiology are well-understood. Yet Eve's work has shown it functions under a remarkable diversity of conditions, and does so is a remarkable variety of ways. We discuss her work on the STG specifically, and what her work implies about trying to study much larger nervous systems, like our human brains.</p>



<ul><li><a href="http://blogs.brandeis.edu/marderlab/">The Marder Lab</a>.</li><li>Twitter: <a href="https://twitter.com/MarderLab">@MarderLab</a>.</li><li>Related to our conversation:<ul><li><a href="https://journals.plos.org/plosbiology/article?id=10.1371/journal.pbio.1002147">Understanding Brains: Details, Intuition, and Big Data</a>.</li><li><a href="http://sites.iiserpune.ac.in/~raghav/pdfs/animalbehavior/ReadingList/getting_1989.pdf">Emerging principles governing the operation of neural networks</a> (Eve mentions this regarding "building blocks" of neural networks).</li></ul></li></ul>





<p>0:00 - Intro
3:58 - Background
8:00 - Levels of ambiguity
9:47 - Stomatogastric nervous system
17:13 - Structure vs. function
26:08 - Role of theory
34:56 - Technology vs. understanding
38:25 - Higher cognitive function
44:35 - Adaptability, resilience, evolution
50:23 - Climate change
56:11 - Deep learning
57:12 - Dynamical systems</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/0274bcb5-8078-4a3e-9bb8-22332ac66d39-130-Eve-Marder.mp3" length="58792390"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[



Support the show to get full episodes and join the Discord community.





Eve discusses many of the lessons she has learned studying a small nervous system, the crustacean stomatogastric nervous system (STG). The STG has only about 30 neurons and its connections and neurophysiology are well-understood. Yet Eve's work has shown it functions under a remarkable diversity of conditions, and does so is a remarkable variety of ways. We discuss her work on the STG specifically, and what her work implies about trying to study much larger nervous systems, like our human brains.



The Marder Lab.Twitter: @MarderLab.Related to our conversation:Understanding Brains: Details, Intuition, and Big Data.Emerging principles governing the operation of neural networks (Eve mentions this regarding "building blocks" of neural networks).





0:00 - Intro
3:58 - Background
8:00 - Levels of ambiguity
9:47 - Stomatogastric nervous system
17:13 - Structure vs. function
26:08 - Role of theory
34:56 - Technology vs. understanding
38:25 - Higher cognitive function
44:35 - Adaptability, resilience, evolution
50:23 - Climate change
56:11 - Deep learning
57:12 - Dynamical systems]]>
                </itunes:summary>
                                                                            <itunes:duration>01:00:56</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 129 Patryk Laurent: Learning from the Real World]]>
                </title>
                <pubDate>Wed, 02 Mar 2022 16:02:10 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-129-patryk-laurent-learning-from-the-real-world</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-129-patryk-laurent-learning-from-the-real-world</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center">Support the show to get full episodes and join the Discord community.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p>Patryk and I discuss his wide-ranging background working in both the neuroscience and AI worlds, and his resultant perspective on what's needed to move forward in AI, including some principles of brain processes that are more and less important. We also discuss his own work using some of those principles to help deep learning generalize to better capture how humans behave in and perceive the world.</p>



<ul><li><a href="https://pakl.net/">Patryk's homepage</a>.</li><li>Twitter: <a href="https://twitter.com/paklnet">@paklnet</a>.</li><li>Related papers<ul><li><a href="https://arxiv.org/abs/1607.06854">Unsupervised Learning from Continuous Video in a Scalable Predictive Recurrent Network</a>.</li></ul></li></ul>





<p>0:00 - Intro
2:22 - Patryk's background
8:37 - Importance of diverse skills
16:14 - What is intelligence?
20:34 - Important brain principles
22:36 - Learning from the real world
35:09 - Language models
42:51 - AI contribution to neuroscience
48:22 - Criteria for "real" AI
53:11 - Neuroscience for AI
1:01:20 - What can we ignore about brains?
1:11:45 - Advice to past self</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Support the show to get full episodes and join the Discord community.









Patryk and I discuss his wide-ranging background working in both the neuroscience and AI worlds, and his resultant perspective on what's needed to move forward in AI, including some principles of brain processes that are more and less important. We also discuss his own work using some of those principles to help deep learning generalize to better capture how humans behave in and perceive the world.



Patryk's homepage.Twitter: @paklnet.Related papersUnsupervised Learning from Continuous Video in a Scalable Predictive Recurrent Network.





0:00 - Intro
2:22 - Patryk's background
8:37 - Importance of diverse skills
16:14 - What is intelligence?
20:34 - Important brain principles
22:36 - Learning from the real world
35:09 - Language models
42:51 - AI contribution to neuroscience
48:22 - Criteria for "real" AI
53:11 - Neuroscience for AI
1:01:20 - What can we ignore about brains?
1:11:45 - Advice to past self]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 129 Patryk Laurent: Learning from the Real World]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center">Support the show to get full episodes and join the Discord community.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p>Patryk and I discuss his wide-ranging background working in both the neuroscience and AI worlds, and his resultant perspective on what's needed to move forward in AI, including some principles of brain processes that are more and less important. We also discuss his own work using some of those principles to help deep learning generalize to better capture how humans behave in and perceive the world.</p>



<ul><li><a href="https://pakl.net/">Patryk's homepage</a>.</li><li>Twitter: <a href="https://twitter.com/paklnet">@paklnet</a>.</li><li>Related papers<ul><li><a href="https://arxiv.org/abs/1607.06854">Unsupervised Learning from Continuous Video in a Scalable Predictive Recurrent Network</a>.</li></ul></li></ul>





<p>0:00 - Intro
2:22 - Patryk's background
8:37 - Importance of diverse skills
16:14 - What is intelligence?
20:34 - Important brain principles
22:36 - Learning from the real world
35:09 - Language models
42:51 - AI contribution to neuroscience
48:22 - Criteria for "real" AI
53:11 - Neuroscience for AI
1:01:20 - What can we ignore about brains?
1:11:45 - Advice to past self</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/1aac6e73-406c-4b48-84ea-d3d72b9a48d6-129-Patryk-Laurent-public.mp3" length="78083383"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Support the show to get full episodes and join the Discord community.









Patryk and I discuss his wide-ranging background working in both the neuroscience and AI worlds, and his resultant perspective on what's needed to move forward in AI, including some principles of brain processes that are more and less important. We also discuss his own work using some of those principles to help deep learning generalize to better capture how humans behave in and perceive the world.



Patryk's homepage.Twitter: @paklnet.Related papersUnsupervised Learning from Continuous Video in a Scalable Predictive Recurrent Network.





0:00 - Intro
2:22 - Patryk's background
8:37 - Importance of diverse skills
16:14 - What is intelligence?
20:34 - Important brain principles
22:36 - Learning from the real world
35:09 - Language models
42:51 - AI contribution to neuroscience
48:22 - Criteria for "real" AI
53:11 - Neuroscience for AI
1:01:20 - What can we ignore about brains?
1:11:45 - Advice to past self]]>
                </itunes:summary>
                                                                            <itunes:duration>01:21:01</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 128 Hakwan Lau: In Consciousness We Trust]]>
                </title>
                <pubDate>Sun, 20 Feb 2022 16:44:01 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-128-hakwan-lau-in-consciousness-we-trust</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-128-hakwan-lau-in-consciousness-we-trust</link>
                                <description>
                                            <![CDATA[<p class="has-text-align-center">Support the show to get full episodes and join the Discord community.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p>Hakwan and I discuss many of the topics in his new book, <a href="https://www.amazon.com/gp/product/B09RBB5LBW/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=B09RBB5LBW&amp;linkId=04019273dfa08820df5aafdbdef49d20">In Consciousness we Trust: The Cognitive Neuroscience of Subjective Experience</a>. Hakwan describes his perceptual reality monitoring theory of consciousness, which suggests consciousness may act as a systems check between our sensory perceptions and higher cognitive functions. We also discuss his latest thoughts on mental quality space and how it relates to perceptual reality monitoring. Among many other topics, we chat about the many confounds and challenges to empirically studying consciousness, a topic featured heavily in the first half of his book. Hakwan was on a previous episode with Steve Fleming, <a href="https://braininspired.co/podcast/99/">BI 099 Hakwan Lau and Steve Fleming: Neuro-AI Consciousness</a>.</p>





<ul><li>Hakwan's lab: <a href="https://sites.google.com/view/hakwan-lau-lab">Consciousness and Metacognition Lab</a>.</li><li>Twitter: <a href="https://twitter.com/hakwanlau">@hakwanlau</a>.</li><li>Book:<ul><li><a href="https://www.amazon.com/gp/product/B09RBB5LBW/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=B09RBB5LBW&amp;linkId=04019273dfa08820df5aafdbdef49d20">In Consciousness we Trust: The Cognitive Neuroscience of Subjective Experience</a>.</li></ul></li></ul>





<p>0:00 - Intro
4:37 - In Consciousness We Trust
12:19 - Too many consciousness theories?
19:26 - Philosophy and neuroscience of consciousness
29:00 - Local vs. global theories
31:20 - Perceptual reality monitoring and GANs
42:43 - Functions of consciousness
47:17 - Mental quality space
56:44 - Cognitive maps
1:06:28 - Performance capacity confounds
1:12:28 - Blindsight
1:19:11 - Philosophy vs. empirical work</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Support the show to get full episodes and join the Discord community.









Hakwan and I discuss many of the topics in his new book, In Consciousness we Trust: The Cognitive Neuroscience of Subjective Experience. Hakwan describes his perceptual reality monitoring theory of consciousness, which suggests consciousness may act as a systems check between our sensory perceptions and higher cognitive functions. We also discuss his latest thoughts on mental quality space and how it relates to perceptual reality monitoring. Among many other topics, we chat about the many confounds and challenges to empirically studying consciousness, a topic featured heavily in the first half of his book. Hakwan was on a previous episode with Steve Fleming, BI 099 Hakwan Lau and Steve Fleming: Neuro-AI Consciousness.





Hakwan's lab: Consciousness and Metacognition Lab.Twitter: @hakwanlau.Book:In Consciousness we Trust: The Cognitive Neuroscience of Subjective Experience.





0:00 - Intro
4:37 - In Consciousness We Trust
12:19 - Too many consciousness theories?
19:26 - Philosophy and neuroscience of consciousness
29:00 - Local vs. global theories
31:20 - Perceptual reality monitoring and GANs
42:43 - Functions of consciousness
47:17 - Mental quality space
56:44 - Cognitive maps
1:06:28 - Performance capacity confounds
1:12:28 - Blindsight
1:19:11 - Philosophy vs. empirical work]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 128 Hakwan Lau: In Consciousness We Trust]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p class="has-text-align-center">Support the show to get full episodes and join the Discord community.</p>



<a href="https://www.patreon.com/braininspired"></a>





<p>Hakwan and I discuss many of the topics in his new book, <a href="https://www.amazon.com/gp/product/B09RBB5LBW/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=B09RBB5LBW&amp;linkId=04019273dfa08820df5aafdbdef49d20">In Consciousness we Trust: The Cognitive Neuroscience of Subjective Experience</a>. Hakwan describes his perceptual reality monitoring theory of consciousness, which suggests consciousness may act as a systems check between our sensory perceptions and higher cognitive functions. We also discuss his latest thoughts on mental quality space and how it relates to perceptual reality monitoring. Among many other topics, we chat about the many confounds and challenges to empirically studying consciousness, a topic featured heavily in the first half of his book. Hakwan was on a previous episode with Steve Fleming, <a href="https://braininspired.co/podcast/99/">BI 099 Hakwan Lau and Steve Fleming: Neuro-AI Consciousness</a>.</p>





<ul><li>Hakwan's lab: <a href="https://sites.google.com/view/hakwan-lau-lab">Consciousness and Metacognition Lab</a>.</li><li>Twitter: <a href="https://twitter.com/hakwanlau">@hakwanlau</a>.</li><li>Book:<ul><li><a href="https://www.amazon.com/gp/product/B09RBB5LBW/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=B09RBB5LBW&amp;linkId=04019273dfa08820df5aafdbdef49d20">In Consciousness we Trust: The Cognitive Neuroscience of Subjective Experience</a>.</li></ul></li></ul>





<p>0:00 - Intro
4:37 - In Consciousness We Trust
12:19 - Too many consciousness theories?
19:26 - Philosophy and neuroscience of consciousness
29:00 - Local vs. global theories
31:20 - Perceptual reality monitoring and GANs
42:43 - Functions of consciousness
47:17 - Mental quality space
56:44 - Cognitive maps
1:06:28 - Performance capacity confounds
1:12:28 - Blindsight
1:19:11 - Philosophy vs. empirical work</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/87c6cd13-b8d6-4474-b2e9-42459eda761a-128-Hakwan-Lau-public.mp3" length="82548911"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Support the show to get full episodes and join the Discord community.









Hakwan and I discuss many of the topics in his new book, In Consciousness we Trust: The Cognitive Neuroscience of Subjective Experience. Hakwan describes his perceptual reality monitoring theory of consciousness, which suggests consciousness may act as a systems check between our sensory perceptions and higher cognitive functions. We also discuss his latest thoughts on mental quality space and how it relates to perceptual reality monitoring. Among many other topics, we chat about the many confounds and challenges to empirically studying consciousness, a topic featured heavily in the first half of his book. Hakwan was on a previous episode with Steve Fleming, BI 099 Hakwan Lau and Steve Fleming: Neuro-AI Consciousness.





Hakwan's lab: Consciousness and Metacognition Lab.Twitter: @hakwanlau.Book:In Consciousness we Trust: The Cognitive Neuroscience of Subjective Experience.





0:00 - Intro
4:37 - In Consciousness We Trust
12:19 - Too many consciousness theories?
19:26 - Philosophy and neuroscience of consciousness
29:00 - Local vs. global theories
31:20 - Perceptual reality monitoring and GANs
42:43 - Functions of consciousness
47:17 - Mental quality space
56:44 - Cognitive maps
1:06:28 - Performance capacity confounds
1:12:28 - Blindsight
1:19:11 - Philosophy vs. empirical work]]>
                </itunes:summary>
                                                                            <itunes:duration>01:25:40</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 127 Tomás Ryan: Memory, Instinct, and Forgetting]]>
                </title>
                <pubDate>Thu, 10 Feb 2022 16:26:38 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-127-tomas-ryan-memory-instinct-and-forgetting</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-127-tomas-ryan-memory-instinct-and-forgetting</link>
                                <description>
                                            <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2022/02/art-127-01.jpg" alt="" class="wp-image-1637" />



<p class="has-text-align-center"><strong>Support the show to get full episodes and join the Discord community</strong>.</p>



<div class="wp-block-image"><a href="https://www.patreon.com/braininspired"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2021/09/become_a_patron_button@2x.png" alt="" class="wp-image-1434" /></a></div>



<div class="wp-block-image"><img width="228" height="300" src="https://braininspired.co/wp-content/uploads/2022/02/tomas-300x394-228x300-1.jpg" alt="" class="wp-image-1638" /></div>



<p>Tomás and I discuss his research and ideas on how memories are encoded (the engram), the role of forgetting, and the overlapping mechanisms of memory and instinct. Tomás uses otpogenetics and other techniques to label and control neurons involved in learning and memory, and has shown that forgotten memories can be restored by stimulating “engram cells” originally associated with the forgotten memory. This line of research has led Tomás to think forgetting might be a learning mechanism itself, a adaption our brains make based on the predictability and affordances of the environment. His work on engrams has also led Tomás to think our instincts (ingrams) may share the same mechanism of our memories (engrams), and that memories may transition to instincts across generations. We begin by addressing Randy Gallistel’s engram ideas from the previous episode: <a href="https://braininspired.co/podcast/126/" target="_blank" rel="noreferrer noopener">BI 126 Randy Gallistel: Where Is the Engram?</a></p>



<ul><li><a href="https://ryan-lab.org/tomas-ryan/">Ryan Lab</a>.</li><li>Twitter: <a href="https://twitter.com/tjryan_77?lang=en">@TJRyan_77</a>.</li><li>Related papers<ul><li><a href="https://www.sciencedirect.com/science/article/pii/S0959438821000088">Engram cell connectivity: an evolving substrate for information storage</a>.</li><li><a href="https://pubmed.ncbi.nlm.nih.gov/35027710/">Forgetting as a form of adaptive engram cell plasticity.</a></li><li>Memory and Instinct as a Continuum of Information Storage in <a href="https://www.amazon.com/gp/product/0262043254/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=0262043254&amp;linkId=7104c2dad5abdbb3909d4d8e4f8dd8e0">The Cognitive Neurosciences.</a></li><li><a href="https://monoskop.org/images/2/2f/Shannon_Claude_E_1956_The_Bandwagon.pdf">The Bandwagon</a> by Claude Shannon.</li></ul></li></ul>


<div class="lightweight-accordion"><span>Transcript</span><div class="lightweight-accordion-body">

<p>Tomás    00:00:03    We were in the privileged position to be able to label and manipulate specific ensembles themselves in the hippocampus of awake behaving mice and in doing so, we were able to show that these specific ensembles of neurons were somehow containing, or at least are a part of the information that is allowing us to catch. I gone to specific memories. You start to think more about the value of forgetting and that forgetting maybe not so much a bug of the brain, but a feature as long as they’re made at the same stuff, then you create the opportunity for the evolution and learning processes to be continuous. Um, and in a sense, what you have is this rolling evolution convergently happening by learning evolution, learning evolution influencing one another.  </p>



<p>Speaker 0    00:01:06    This is brain inspired.  </p>



<p>Paul    00:01:20    Welcome everyone. It’s Paul. So in the last episode, I spoke with Randy gala still about his idea that the in Graham, the physical substrate of our memories must be stored within neurons in some sort of stable molecular substrate that goes against the grain of most modern neuroscience, which suggests our memories are somehow stored among the connections and structure of ensembles or networks of n...</p></div></div>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[




Support the show to get full episodes and join the Discord community.











Tomás and I discuss his research and ideas on how memories are encoded (the engram), the role of forgetting, and the overlapping mechanisms of memory and instinct. Tomás uses otpogenetics and other techniques to label and control neurons involved in learning and memory, and has shown that forgotten memories can be restored by stimulating “engram cells” originally associated with the forgotten memory. This line of research has led Tomás to think forgetting might be a learning mechanism itself, a adaption our brains make based on the predictability and affordances of the environment. His work on engrams has also led Tomás to think our instincts (ingrams) may share the same mechanism of our memories (engrams), and that memories may transition to instincts across generations. We begin by addressing Randy Gallistel’s engram ideas from the previous episode: BI 126 Randy Gallistel: Where Is the Engram?



Ryan Lab.Twitter: @TJRyan_77.Related papersEngram cell connectivity: an evolving substrate for information storage.Forgetting as a form of adaptive engram cell plasticity.Memory and Instinct as a Continuum of Information Storage in The Cognitive Neurosciences.The Bandwagon by Claude Shannon.


Transcript

Tomás    00:00:03    We were in the privileged position to be able to label and manipulate specific ensembles themselves in the hippocampus of awake behaving mice and in doing so, we were able to show that these specific ensembles of neurons were somehow containing, or at least are a part of the information that is allowing us to catch. I gone to specific memories. You start to think more about the value of forgetting and that forgetting maybe not so much a bug of the brain, but a feature as long as they’re made at the same stuff, then you create the opportunity for the evolution and learning processes to be continuous. Um, and in a sense, what you have is this rolling evolution convergently happening by learning evolution, learning evolution influencing one another.  



Speaker 0    00:01:06    This is brain inspired.  



Paul    00:01:20    Welcome everyone. It’s Paul. So in the last episode, I spoke with Randy gala still about his idea that the in Graham, the physical substrate of our memories must be stored within neurons in some sort of stable molecular substrate that goes against the grain of most modern neuroscience, which suggests our memories are somehow stored among the connections and structure of ensembles or networks of n...]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 127 Tomás Ryan: Memory, Instinct, and Forgetting]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2022/02/art-127-01.jpg" alt="" class="wp-image-1637" />



<p class="has-text-align-center"><strong>Support the show to get full episodes and join the Discord community</strong>.</p>



<div class="wp-block-image"><a href="https://www.patreon.com/braininspired"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2021/09/become_a_patron_button@2x.png" alt="" class="wp-image-1434" /></a></div>



<div class="wp-block-image"><img width="228" height="300" src="https://braininspired.co/wp-content/uploads/2022/02/tomas-300x394-228x300-1.jpg" alt="" class="wp-image-1638" /></div>



<p>Tomás and I discuss his research and ideas on how memories are encoded (the engram), the role of forgetting, and the overlapping mechanisms of memory and instinct. Tomás uses otpogenetics and other techniques to label and control neurons involved in learning and memory, and has shown that forgotten memories can be restored by stimulating “engram cells” originally associated with the forgotten memory. This line of research has led Tomás to think forgetting might be a learning mechanism itself, a adaption our brains make based on the predictability and affordances of the environment. His work on engrams has also led Tomás to think our instincts (ingrams) may share the same mechanism of our memories (engrams), and that memories may transition to instincts across generations. We begin by addressing Randy Gallistel’s engram ideas from the previous episode: <a href="https://braininspired.co/podcast/126/" target="_blank" rel="noreferrer noopener">BI 126 Randy Gallistel: Where Is the Engram?</a></p>



<ul><li><a href="https://ryan-lab.org/tomas-ryan/">Ryan Lab</a>.</li><li>Twitter: <a href="https://twitter.com/tjryan_77?lang=en">@TJRyan_77</a>.</li><li>Related papers<ul><li><a href="https://www.sciencedirect.com/science/article/pii/S0959438821000088">Engram cell connectivity: an evolving substrate for information storage</a>.</li><li><a href="https://pubmed.ncbi.nlm.nih.gov/35027710/">Forgetting as a form of adaptive engram cell plasticity.</a></li><li>Memory and Instinct as a Continuum of Information Storage in <a href="https://www.amazon.com/gp/product/0262043254/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=0262043254&amp;linkId=7104c2dad5abdbb3909d4d8e4f8dd8e0">The Cognitive Neurosciences.</a></li><li><a href="https://monoskop.org/images/2/2f/Shannon_Claude_E_1956_The_Bandwagon.pdf">The Bandwagon</a> by Claude Shannon.</li></ul></li></ul>


<div class="lightweight-accordion"><span>Transcript</span><div class="lightweight-accordion-body">

<p>Tomás    00:00:03    We were in the privileged position to be able to label and manipulate specific ensembles themselves in the hippocampus of awake behaving mice and in doing so, we were able to show that these specific ensembles of neurons were somehow containing, or at least are a part of the information that is allowing us to catch. I gone to specific memories. You start to think more about the value of forgetting and that forgetting maybe not so much a bug of the brain, but a feature as long as they’re made at the same stuff, then you create the opportunity for the evolution and learning processes to be continuous. Um, and in a sense, what you have is this rolling evolution convergently happening by learning evolution, learning evolution influencing one another.  </p>



<p>Speaker 0    00:01:06    This is brain inspired.  </p>



<p>Paul    00:01:20    Welcome everyone. It’s Paul. So in the last episode, I spoke with Randy gala still about his idea that the in Graham, the physical substrate of our memories must be stored within neurons in some sort of stable molecular substrate that goes against the grain of most modern neuroscience, which suggests our memories are somehow stored among the connections and structure of ensembles or networks of neurons. Today, I speak with Tomas Ryan to talk about the modern neuroscience of engrams and his work using techniques like optogenetics to label and control specific ensembles of neurons that are known to be involved with specific learned memories. Tomas runs his lab at Trinity college, Dublin, and he and his colleagues have been able to take advantage of modern optogenetic techniques to do things like teach an animal, some new behavior. For example, teach a mouse to avoid some part of its cage, where it has learned it may get a mild foot shock.  </p>



<p>Paul    00:02:22    Then Tomas can label neurons that are involved in learning that new behavior. Then they can make the animal forget that new behavior. So for example, the mouse would no longer avoid that unpleasant part of its cage as if it has no memory of the learned behavior, then make the animal again. Remember that learned behavior in this example, again, avoiding part of its cage, and they do that just by stimulating the ensemble of neurons that were originally labeled as being involved in learning the behavior. So based on many experiments like this, Tomas has the view that the in gram, uh, is stored in the structure of ensembles of neurons. So we begin by comparing this view to Randy gala, Stolz intracellular in Graham story. And then we go on to talk about Tomasa’s other research and theories about the role of forgetting and how forgetting can be viewed as a form of learning based on the predictability and affordances and demands of the environment.  </p>



<p>Paul    00:03:25    We talk about instinct and two muscles ideas on how instinct and memories are overlapping concepts and how memories learned during our lifetime may become instincts in future generations. And we talk about plenty, more show notes are at brand inspired.co/podcast/ 127. So since I had Randy gala still on last time as a slight background and transition from that episode, I’ll just play a short segment when Randy actually brings up Tomas and Randy didn’t know that Tomas was going to be on this episode. So I’ll play it since we begin this episode with their differing views on the, in Graham enjoy,  </p>



<p>    00:04:07    Don’t ask Ryan, uh,  </p>



<p>Paul    00:04:09    He’ll be, he’ll be on the next episode.  </p>



<p>Randy    00:04:12    I can follow up on this or you can ask him what’s his problem with Randy story? Yes, because he and I have been arguing in correspond. Okay. So he, and I agree that the information isn’t stored in this and that in the plastic sentences, and he admits that he does not have a story about how the information is stored. These all focused on these cell assemblers, he’s focused on this sparse coding. And I say, Tomas, that’s all very interesting, but we both think that the real name of the game is looking for the end brand and those cell assemblies. They aren’t, they haven’t read your own work, shows that it must be inside those cells.  </p>



<p>Paul    00:04:56    So Randy said, ask him what his ask him. What’s his problem with my story about, uh, the in gram and it being intracellular, um, uh, sub you know, molecular processes in coding numbers in order to compute. So, Tomas, I guess I’ll start off by asking you, uh, how you would respond to what’s your problem with Randy’s story.  </p>



<p>Tomás    00:05:22    We all want to understand how memory is stored in the brain and the change in the brain that is caused by learning which stores a particular piece of information we call a memory and grounds. Rondi insists that the Ingram must be molecular, that it must be intracellular, and it’s here where computations are happening. The basic problem that I see with any account of memory, which insists that information must be stored at a molecular level within neurons, whether that’s some kind of protein structure, whether that’s some kind of code in polynucleotide chains, is that the brain would have to know which cells to find that molecule in. Right? So we know that sparsity is a fact. We know that when we have a particular experience, that particular cells are being activated. And we know that for given memory, we have a sub portion of cells distributed across many brain regions, which seem to encode that memory and ground. So if you’re going to say that the information is in a molecule within the cell, you have to explain how the brain knows how to find that particular cell in the first place, and not necessarily requires a mnemonic mechanism for storing memory, which is not occurring at a subsidiary level.  </p>



<p>Paul    00:06:53    So, you know, he tacks on to his story that you also need an index, essentially, a pointer, right? Uh, so you not only need to store a number, you need to store an index. So I guess his, his take on that would be, that’s also in coded those addresses  </p>



<p>Tomás    00:07:08    Gala style. He insists on there being a Von Neumann type structure and how the brain does computation and that what we need to be finding therefore, is some kind of Shannon information. And he insists that Shannon information in his words is the only game in town. Uh, and I completely accept that Shannon information is the only game in town when we’re talking about information theory in the context of computer science and engineering, at least it’s the only game in town so far, but when we’re talking about biology, we have to be open to other kinds of information storage. Uh, one of those we already know a great deal about which is genetic information, which is a kind of information which existed obviously for billions of years before we did, uh, which is not encoded in any kind of Shannon forum is not computed in any kind of Von Neumann architecture and which we had to discover.  </p>



<p>Tomás    00:08:15    We didn’t invent it. We had to discover it with theoretical insights, but primarily with a progressive, empirical, uh, discoveries and experiments that allowed us to have that kind of information, uh, revealed to us through science. And that is a kind of information that we know is, is, is just not like Shannon information. And I think which is what is particularly apt here is that cloud Shannon did write a very short piece once called the bandwagon where he was trying to illustrate that information theory as being conceived was conflating in some respects to different things. On the one hand, it’s about communication of information, which is what Shannon information is for it’s for a faithful communication of categorical information with, with total error reduction, from, uh, a source to a destination. Um, it does not contain any, a preauthorized semantic information, semantic information needs to be written into the Shannon information method of, of information transmission.  </p>



<p>Tomás    00:09:27    When we’re talking about the genetic code, we’re talking about a type of semantic information that is embedded by some kind of bio semiotic structure into DNA. And when we’re thinking about memory, uh, we’re going to be dealing with very different substrates. We’re going to be dealing with something happening at a neuro-biological level, obviously. Um, but that type of information is likely to be something that is currently unknown currently undiscovered. And there’s no a priori rationale for imposing Shannon information structures on the brain in that respect. And there’s nothing wrong with looking for them. There’s nothing wrong with looking for information in the brain that may be informed, uh, by concepts that we have from, from computer science. I mean, that’s useful. Um, and it may or may not be partly true where I take issue with, with gala style’s perspective is that he insists that this is the only way and that therefore the brain must, uh, must conform to that. And we just know from genetics, that’s just not true. There are other ways of storing information in biology.  </p>



<p>Paul    00:10:45    You know, I have to be honest, you know, on, on one hand, uh, I like Randy’s story because it is intuitively appealing, right? Given what we know about how a Von Neumann computers work and what we have discovered, you know, about and, and invented about, uh, computation. Um, on the other hand, like you say, I mean, brains evolved. We didn’t design them. So w w it’s something that we have to discover and learn about instead of, so it’s interesting to think about using a product of our own engineering to then, then look back and say, that must be how it’s happening in the brain in some sense. So it’s an it’s intuitively appealing. It’s also kind of simple, right? To think, well, it must just be there. The numbers must be there in, in the neurons. Uh, but then I also, I think it’s a much more interesting story if it’s not done that way, if it’s an in, if memories are encoded somehow, you know, among the connections, even if we don’t have a clean story about how that’s done yet. So it’s, it’s just a much more fun story to think about, uh, how brains might be doing this in a  way.  </p>



<p>Tomás    00:11:51    Biology is mess is messy. And the way that gala style thinks about this seems to be very much influenced by fedora and notions of language of thought, which essentially says we have to have computation in the brain, which, which we do, and that therefore it must be numbers all the way down. And what Randy is saying when he’s challenging conventional neuroscience, is that animals, including rats and mice clearly have concepts of numbers. If they have concepts of time. And therefore we must be able to find those numbers in the brain. If we are to really say that we have a full explanation of memory. Um, now on that proposition, I agree. I think that if we do have a full explanation of memory, we will have to be able to say how an animal or a human is able to have a concept of a particular integer number, uh, in its brain.  </p>



<p>Tomás    00:12:47    But that does not mean that that information needs to be stored at the basic end ground level in a numerical digital format. And one way of illustrating this is to think about other ways that numbers are represented in biology. Um, and one very obvious way is the number of bones that we have in, in our skeletons or the number of vertebrae that we have in our back humans have about 33 vertebrate in our backs. Mice have about 28. I think elephants have about 60. All humans have this number of vertebrae. You will not find that number in our genome, even though it is genetically encoded, even though we are genetically determined to have 33 vertebrae, you will not find 33 anywhere in our genome. What happens is there is an analog to digital rather than analog to integer transition, where you have a body plan with a gradient of morphogen, which works with a fixed number of Hawks patterning genes to create.  </p>



<p>Tomás    00:13:55    So mites segments during a segmentation phase of early development, and they form, um, integers things which are our segments, which then become our vertebrae partly, um, in a very consistent genetic encoded way that is only meaningful at a developmentally expressed level. In the context of a developing organism, the same kind of logic can be true of any other way of storing integer quantities or temporal information in the brain where you start with an continuous gradient, um, that has thresholds imposed on it, according to certain constraints, which result in integers, which can then represent absolute numbers. And I don’t see anything controversial about that. I think that’s just a basic feature of developmental biology. And I also think that there’s an awful lot to learn about the coding of information from the study of innate information in itself and button by that. I mean, instinct. So if we are to under, if you were to really get a handle on how information is coded in the brain, whatever your favorite mechanism is, whether you’re concerned with it at a systems, physiological level, a synaptic level, or at a molecular level, I think it’s really important to consider the overlap of instincts and memories.  </p>



<p>Tomás    00:15:27    And it seems to me that by understanding the minimal essential components of memory and grounds, and in nature, what I would call in grams, we can get a handle on the minimal components, minimal biological components of latent states of long-term information, uh, in the brain. And so when Randy gala style is proposing very strongly, that information must be stored at a molecular level in engram cells, rather than there are in ground molecules. I think he also needs to explain how innate information is coded for at that level at that kind of molecular level. And I asked him that question once I said, okay, so we have instincts too. And he said, yes, of course we have instincts. I mean, there are many definitions of instincts. Um, but taking the view of an instinct that is holding for information about environmental affordances, that, that are relevant to you.  </p>



<p>Tomás    00:16:28    So, you know, w we have nine or 13 or so different definitions of instinct, depending on who you read. It’s not just enough for something to be innate, to be an instinct. So a central pattern generator, the fact that I can breathe is innate. It doesn’t necessarily mean it is an instinct. Um, I would say an instinct is something that is in your brain that is genetically determined. That is developmentally constructed. There may be some plasticity in how that is constructed, but what we know what we mean, it means that we have knowledge about our environment, about the environment. We evolve them to a certain extent. Um, I post a Randy that you need to be able to explain how an instinct is coded for, uh, in the same language, in the same biological language as to how the memory is coded for. And Randy said, well, that’s very straightforward.  </p>



<p>Tomás    00:17:23    It’s coded for the DNA. Um, but that’s really not enough of an answer it’s not really coded for in the DNA because DNA and isolation, genes and isolation will never produce a brain structure. It happens because of an unfolding of nested information patterns that are developmentally realized that can be described as kind of a growth of complexity that in that brain structure being, uh, being formed. And it seems to me that at a molecular level, it would not really make sense to be storing instincts in that respect. I think we, we accept sort of tacitly that instincts are stored in the anatomy of the brain and it’s worth pointing out that we don’t study indistinct yet as much as we’ve studied memory. I mean, there’s far more work being done on the neuroscience of memory than there has been on the neuroscience of instinct. And that’s partly because that’s a throw over from experimental psychology, which traditionally focused more on learning than on an ape behavior for most of the century.  </p>



<p>Tomás    00:18:35    Uh, but there is a lot of work still on the neuro-biological neurobiology of, of instinct. And most people would, would agree that it’s a reasonable, it’s a reasonable viewpoint. That instincts are informational patterns that allow us to compute how to, how to realize adoptive behavior in the moment through our, our brain structures through the way our ser or brain circuitry is structured at both the macro and micro level. And we’re still working out the details of course, of different instincts. And we don’t understand, uh, all of our instincts, uh, but it, it seems to be, seems to be realized and encoded at a systems level. Um, I don’t think that there is a good framework for thinking about innate information that is innate instinctual information about how to behave in your environment, uh, at an interest center level. I’ve never seen any evidence or, or even, uh, a case made for that.  </p>



<p>Tomás    00:19:37    Uh, so I think that’s another problem with the, with the view that memory would be interested in her, but one of the major problems that I have is, is the, is the kinetics of this, or rather not the kinetics, but the, but the temporal dynamics, I suppose I should say. So one of the reasons why Randy has challenged the traditional synaptic plasticity and memory field, uh, for so long is because he observed that it did not follow the temporal conditions of associative learning or what we call associative learning. Um, and that the heavy and mechanisms that we were observing for inducing, synaptic, plasticity, LTP, and even spike timing dependent plasticity didn’t really parallel what we were seeing at a behavioral level. And his view was that the temporal constraints need to be grounded in the facts of experimental psychology in behavior, which makes complete sense.  </p>



<p>Tomás    00:20:39    And I agree with that. I have agreed with that for many years. Uh, the problem is that when you start going into specific molecular, that he’s particularly focused on, which are intracellular, which involve DNA coding, which involve the shifting of polynucleotide chains in a way of storing information, but also retrieving that information. It isn’t biologically plausible in my view for that information to be recalled at the timescales that we require it. So we know that memories can last a really long time that you can then summon them, uh, in an instant you can summon them in seconds or less when you’re really motivated to, or when you just have a random thought for a molecule to be expressed, uh, in a nuclear way that has a manifest effect on the electrophysiological properties of that cell to then affect behavior. I mean, you’re, you’re talking about minutes to hours for that kind of operation to happen based on even the most generous, uh, mech most generous understanding of, of how we can understand cellular molecular processes.  </p>



<p>Tomás    00:21:50    Exactly. So it’s, it’s about speed. Um, and so I don’t consider it realistic. So my own view, which is, I don’t think a very radical view is that memory is not stored, uh, an intracellular level, but where I agree with Randy is that it’s also probably not stored at the level of synaptic weights. And by that, I mean, at the level of the strength of individuals synopsis or, or individual populations of, of synopsis, and that rather it is plasticity of synaptic wiring that creates new sub anatomical micro anatomical circuits that didn’t exist before, which allow the brain to store information in a way that is stable, really embedded in the structure of the brain. And that can be rapidly accessed, uh, whenever required and, and kept and kept there in essentially a state that requires very little energy, very little, uh, thermodynamic effort, because you don’t need to keep synaptic weights at a certain, uh, strength.  </p>



<p>Tomás    00:23:00    And it also reduces error reduction in that sense, because what you’re concerned with is the wiring of synaptic connections. Do you have a pathway between a particular set of nodes? And if you do, then you have that information, which can be agnostic to the actual strengths of the different connections between those nodes. So you can have drift of synaptic weight either as an effect of further experience or because of the synaptic homeostasis or a synaptic elasticity, which we know exists, which is when the set point of a particular set of signups is goes down to a previously existing synaptic weight following a previous, um, experience. Um, and if we remove the magnitude of synaptic weight as being a crucial feature of information storage, what we’re left with is I think a much more stable, reliable, durable way of storing information. And importantly, one that is congruent with how the brain is likely storing innate information. Um, that is instinct.  </p>



<p>Paul    00:24:15    Okay. So I’m going to ask you about optogenetics here, but one more question before that scientists are people, right. And we are, uh, we cling to our ideas and it’s really hard for us to change our minds about anything. Have you, um, can you remember a time when scientifically your mind has been changed where you thought one thing and, um, evidence convinced you that it was another thing, because I’m not sure that so Randy certainly is never going to change his mind. I don’t believe, uh, about his thoughts on the way that memory works. And, you know, this is beyond just memory. It’s more of a sociological question, I suppose, about science. Do do any of us really ever change our minds?  </p>



<p>Tomás    00:24:59    I think it’s really important to be able to change your mind. Um, but it happens very slowly for things that are a matter of, of theory or opinion. I mean, the way science works is that it is slightly adversarial, but it’s grounded in data. And when somebody eventually does an experiment, uh, we must pay attention to that. And we eventually, we have to admit that a particular fact or a particular thing is a certain way. I think people are more difficult to persuade that they need to change their minds when we’re talking about theory and not, and not facts. And, you know, I often tell my students that science is theory based not fact-based. Um, we can’t get a full picture of the world, certainly not a full picture of the brain just by stitching together facts that we discover in the lab. We have to have a coherent theory.  </p>



<p>Tomás    00:25:56    The theory has to have, uh, the capacity to make predictions and for us to form questions. And we design experiments in order to test a theory in a sense, the data that we produce in experiments are tools in a sense to test the theory, to refine the theory and to assess the predictive value of a theory. Um, most of the science that we do does not really challenge or inform theories in a very large sense. We tend to evolve our theories in a much more gradually listic way, uh, working as a, as a community. Um, and I learned this very much from people like reading, reading people like Daniel Dennett, uh, where he takes a very, very subtle, uh, slow approach for trying to persuade people that they’re the received wisdom on something may be incorrect. So you asked me when was my mind change, I suppose that I’ve been a Cartesian dualist for much of my life and I’m not a religious person.  </p>



<p>Tomás    00:27:06    Uh, I think I left, uh, organized religion when I was about 12 years of age, but I got into neuroscience in my twenties, and I think I was a Cartesian dualist right up until I was about 28, 29. And I was resisting any notion that there may be nothing magical about the mind, because I was still hanging on to this idea that many of us seem to have that there’s some kind of, not a homonculus, but some kind of driver in my brain that I was really in control of everything that was going on. And when through reading Daniel Dennett, I really got the slowly was brought towards the perspective that was based on facts that I was already aware of, but I had never been arranged in, in this way that I realized that that was wrong. And that what I was calling consciousness was really some kind of, as, as Dennis says, a user illusion.  </p>



<p>Tomás    00:28:03    And why would I was so impressed by was the lack of ostentatiousness and how he did this, because he didn’t take a dramatic polemic, uh, combative position with respect to this. You know, he, he created a picture. He was very good at he’s very good at meeting the reader where they are, uh, he communicates crucial insights and creates a picture. And I think that that is a very productive and, and civilized way of, of changing people’s minds. And I hope I try and bring that into my own teaching with respect to my own science. Um, I got into neuroscience because I was really fascinated with an idea that I think has since been shown to be not entirely accurate. So I was originally a geneticist. I was a genetics undergrad, and I was getting quite bored of the way, uh, genetics had gone. So, so I, I majored in genetics because I really wanted to understand, uh, what we knew about genomics and how we evolved as a species.  </p>



<p>Tomás    00:29:06    And by the time I got into my sophomore year, I became quite, quite bored of what was going on. I mean, it seemed to me that we had answered so many of the questions at least that I wanted to answer. Um, and I was wondering what I would do, um, with my, with my life and what I would, I become a scientist. And then I went to one lecture by hydrocephalus memory geneticists called money Ramaswamy, who was explaining, uh, to us at the Trinity college Dublin, how short-term memories were encoded as covalent changes in proteins that supported synaptic plasticity. Whereas long-term memories were encoded through gene expression, cascades that would result in late phase long-term potentiation and permanent synaptic way changes. And I was immediately fascinated by this idea. And that day, I think I became a neuroscientist because I just thought it was the most amazing thing that a genetic switch would decide how you go from a short-term memory to, to a long-term memory.  </p>



<p>Tomás    00:30:10    And so I got into neuroscience and I had a PhD experience that was quite, um, was extremely valuable to me at Cambridge. And it was in my post-doc at MIT when I was actually working on em and Graham cells, where we started to test some of those ideas that were part of the bedrock of the memory field. There was always a problem with the idea that long-term memories would require gene expression changes. Um, in some of this was illustrated by Rhonda gala style’s work and others, because the temporal conditions of this would re would require hours to happen. And of course there was a stop gap met, you know, placeholder in that theory where you would have a short term synaptic correlate that would lead to a long-term Snapchat correlation. It was very much focused on the molecular and cell biology of long-term potentiation. Um, and that if we simply understood the idea was really, if we simply understood the molecular and cellular biology of long-term potentiation, we would be able to explain how synaptic weight plasticity was maintained indefinitely.  </p>



<p>Tomás    00:31:23    And therefore we would understand memory and through reading more and more behavioral neuroscience and becoming more aware of experimental psychology. I started to see the problems with this actual, with some of the apprentices on which we were basing that, um, one was that we were avoiding the issue of information storage because we didn’t have the technology to study particular memory on grams in a functional way. At the time what we were doing was we were studying the cellular biology and the synaptic physiology of the process of learning and the process of memory maintenance, the process of memory retrieval. And reconsolidation, uh, the problem was that we were not really understanding how specific memories were coded, how my memory of having coffee with you is very different from my memory of meeting a student today, even though they were going to both types, both memories are going to be involving the same brain regions, of course, and would involve the same biological processes.  </p>



<p>Tomás    00:32:22    The point is that we’re also studying separate end Graham, separate memories using the same biology, just as different genes are stored, are coded in exactly the same material. All of our genes are coded for more or less in, in DNA. So the same molecular mechanism, but completely different pieces of so and ground labeling technology, um, which came into its own in 2010, 2012 because of work and a ton of gala lab. And before that, uh, to a certain extent in the Jocelyn lab, um, in Toronto, and of course the Tanigawa lab, we were, we were building on the tools created by mark Mefford in San Diego for immediate early gene transgenics, and also very much on optogenetics. Um, but we were in the privileged position to be able to label and manipulate specific ensembles themselves in the hippocampus of awake behaving mice and in doing so, we were able to show that these specific ensembles of neurons were somehow containing, or at least are a part of the information that is allowing us to hitchhike on to specific memories.  </p>



<p>Tomás    00:33:35    And using this technology, we can elicit artificial recall of specific, naturally formed contextual memories in the mouse, and we can also inhibit them. And since then, this technology has been applied to many different behavioral paradigms, um, and in many different brain regions. And one early experiment that we did was to ask whether memories would survive in an engram if we disrupted late phase long-term potentiation, or if we disrupted rather memory consolidation in general, which includes assess of gene expression, dependent biological processes, which are required for a number of things, including the maintenance of strengthened synaptic weight. And when we did this experiment, we were able to see that we were able to abolish as far as we could measure changes in synaptic weight that were specific to engram cells, both measured by the strength of excitatory postsynaptic occurrence, but also the density of post synaptic dendrites in underground cells. And other people have reproduced these findings. But what we found was that when we opted genetically stimulated, those cells, that the information seemed to survive in a number of different behavioral conditions. So in other words, the learned information was still being carried in those engram cells, despite the fact that that information was not being accessed by natural retrieval cues. We got a lot of pushback from the community at this time.  </p>



<p>Paul    00:35:10    What was the nature of the pushback?  </p>



<p>Tomás    00:35:12    Well, people don’t people approach this finding with the degree of skepticism that it deserved. You know, we had a long time in peer review. We had a long time persuading people at conferences, of course, and it’s completely appropriate.  </p>



<p>Paul    00:35:26    What was the, what was, what was it, um, that they don’t like the story of, you know, some sparse representation of in gram cells and you can just zap them and, and the mouse, you know, recalls the, um, the contextual behavior or they didn’t, what part didn’t they buy or was it all day?  </p>



<p>Tomás    00:35:44    I think it depends on who you talk to. I mean, I think there were probably three broad schools of neuroscientist, um, that were interested in, in this kind of result. Uh, on the one hand you had the molecular neuroscientists on the other extreme, you had the systems neuroscientists and in between you had what I would call the synaptic plasticity and memory people. Um, and they all would have approached it with slightly different, I think, attitudes, um, the molecular neuroscientists and I was previously a molecular neuroscientists, um, are working from a molecules first approach. You know, they’re working on NMD receptor biology, or they’re working on PKMS ADR, or they’re working on prions, they’re working on some kind of molecular pathway that generally speaking works for a number of different, uh, physiological phenotypes, but tends to, they tend to focus on synoptic way changes. Then they tend to focus on LTP, um, and their goal is to contribute to the molecular understanding of memory.  </p>



<p>Tomás    00:36:52    Um, so there’s a, there was an immediately immediately response there because they wanted to know, well, if not this molecule, then what is the molecule that’s storing memory on the other extreme, you had the InVivo physiologists people who are studying how ensembles of cells are functioning in awake, behaving animals, going back to the discovery of place fields and everything since then. And there’s a justifiable reaction to, and ground biology from that quarter of neuroscience. Uh, and the reason being is that they’ve spent decades listening in very carefully on how the piano of the brain and the PA how the engram is actually played in a very, uh, delicate and specific symphony. And then we came in with a set of hammers and started banging the piano keys, making quite strong statements, and then walking off. And that’s obviously going to get some people’s backs up to a certain extent.  </p>



<p>Tomás    00:37:57    Um, we were using optogenetics to turn on and on turn on and off ensembles of cells that don’t, uh, spike at the same time in vivo. And we were doing it as a natural frequencies and until holography becomes so sophisticated and do a bake witness, that everyone can do it in efficient and inefficient way. We’re going to have to continue, uh, using these kinds of methods. And we accept those limitations entirely. But we think that doing these kinds of experiments at least allows us to ask certain questions that give us reasonably clear to, to a certain extent, particularly when they’re loss of function, but even when their gain of function. Because even though we activate a particular end ground at 20 Hertz and all the on ground cells in that brain region are spiking simultaneously. Um, the downstream circuitry we hope is going to take over to a certain degree of, uh, normality  </p>



<p>Paul    00:38:59    The dynamics of the sequence and dynamics of the firing. Yeah.  </p>



<p>Tomás    00:39:03    Yes, exactly. And in other experiments, of course, you don’t need to do the behavior in real time. You can stimulate to create an association and then you can look at natural behavior afterwards. And of course there, the real, I think a benefit of Anne Graham technology as, as we do it, is that we don’t interfere with learning at all. And that the learning is done in a completely natural way. And we allow the brain to show us where the cells are. We don’t engineer anything. There’s nothing  </p>



<p>Paul    00:39:31    Just to like, pause on that for one moment. So, so you teach a mouse, let’s say, or a rodent, um, a task or, or a fear conditioning paradigm, and you have used transgenics for, uh, so that the, the neurons, the Ingram cells, quote unquote, the neuron, the in gram cells, uh, that are involved in that learning process start to, um, express these early genes, right? For, um, that, that are involved in plasticity and, and the transgenics then can basically label those very specific cells. Uh, and then that’s when you can start manipulating.  </p>



<p>Tomás    00:40:07    So we’re, we’re hijacking immediate, early genes, and immediately genes have been studied for many years by people like Mike Greenberg and mark Mefford engineered a particular transgenic mouse. Now there are other kinds of mice that we can use, but his mouse co-opted the C phos promoter, which is one gene whose expression is a function of neuronal activity. And then we can use this gene to activate and excuse me, to label the naturally activated cells in a particular, particular learning experience. And we can control the labeling of the cells using exogenous constructs that are dependent on antibiotic administration or on Tamoxifen administration. But, but putting the technical details aside the point is that we can label cells that were naturally formed juror or engram cells that were naturally formed during learning. Um, and so that the methodology is not perfect when it comes to activating these cells.  </p>



<p>Tomás    00:41:05    Uh, but they do give us, um, a functional analysis of engram cells in a way that can be controlled. I think, to the standards of, uh, any standard behavioral neuroscience experiment or with respect to how people responded to it. I think in between there was this synoptic, plasticity and memory field who for a long time, um, were working on the premise that the kind of plasticity that must be observing, uh, in order to fit in with the idea of heavy and synaptic plasticity, as a mechanism for memory storage would be what we call long-term potentiation, um, synaptic potentiation, which can be caused by high-frequency trains, uh, in vitro or in vivo, uh, or, or also by spike timing, dependent plasticity induction procedures, which are, we think more, more natural. And that these would be ways of storing information in the strength of connections between neurons that were already connected.  </p>



<p>Tomás    00:42:07    Uh, but if we step back for a second and we look more broadly at the kinds of synaptic plasticity that can happen, um, there’s a very nice review that was written by Carlos Kubota and others. Um, I think in 2004, which very nicely separated out the idea of synaptic wiring versus synaptic waste. And there are two different kinds of plasticity that often occur at the same time. And because they occur at the same time, they can be experimentally difficult to disentangle. So plasticity of synaptic weight, um, refers to the strength of the connections and plasticity of synoptic wiring means are changing the ConnectTo okay, you’re changing the connectome, you’re forming new connections in your lattice that were not there before the learning experience. And what adds to the confusion is that both types of synaptic plasticity can be induced by two different types of biological plasticity.  </p>



<p>Tomás    00:43:08    One would be, um, the structural plasticity of dendritic structures, um, and the other being what most people would refer to simply as synaptic plasticity, uh, which means the, in the simplest case, see addition of AMPA receptors, uh, to synaptic, to post synaptic sites, you could also include pre synaptic plasticity. Um, and the reason that both types of biological plasticity can contribute to both kinds of information plasticity is that when you consider plasticity of synaptic weight, obviously that can be increased by trafficking AMPA receptors to the sit-ups. And that’s the simplest way we think about it, but it also can be triggered by structural plasticity, because if you have a population of neurons or a population of neurons that are all connected to a post synaptic set of neurons, if you increase structural plasticity, by having more Denverites on the post synaptic side, it’s technically a structural plasticity, but really are just increasing effective synaptic ways.  </p>



<p>Tomás    00:44:12    You’re not really changing the synaptic wire because they’re already connected. Then you move over to the informational plasticity of synaptic wiring that can be mediated both by structural plasticity, obviously, because if you have slight axon on movements, or if you have dendritic movement or new dendritic spines, or new dendritic structural plasticity, you can form new connections between neurons that were not there before. And so you change the connectome, but you can also have plasticity of synaptic wiring due to basic intracellularly driven synaptic plasticity. When you consider silence synopsis. So silent synopsis are NMD, a receptor only synaptic connections that are existing, but they’re, non-functional because they have no AMPA receptors. And you can imagine how, in certain cases, these become silent or excuse me, become an silent. And we know about this, um, based on different experiences. So in that sense, a very simple biological event of transporting AMPA receptors to the post synaptic site would not structurally change the connectome, but functionally changed the connectome because it could create a new pathway that was there, but never really active.  </p>



<p>Tomás    00:45:22    But the point is that when we’re considering the plasticity of synaptic wiring, this is a kind of plasticity that seems to survive, uh, the disruption of late phase synaptic potentiation. And it seems to survive with the memory in various cases of amnesia. And this wasn’t just done in the town of Gower lab, if these kinds of experiments have been reproduced, uh, at the Denny lab at Columbia and the Franklin lab in Toronto, in my own lab at Trinity college Dublin, also in Japan, um, and, and in Germany, and more and more, what we’re finding is that in different cases of amnesia, the memory survives despite the fact of amnesia and that the connectivity between engram cells survives and my working hypothesis, I suppose, is that it’s the plasticity of synaptic wiring initiated learning that is allowing us to store information in a longterm and stable state while the plasticity of synaptic weight, which doesn’t alter the connectome, but of course occurs even in new synoptic context.  </p>



<p>Tomás    00:46:36    So if you form new, if you make new synaptic connections during learning those, those synopsis are also going to be potentiated. This is what I mean by they’re happening at the same time, which, which causes some confusion, but that plasticity of synaptic weight, uh, I agree with with gala style is probably not storing information. It’s probably quite important for learning because learning happens often over a minutes or even hours, depending on the episode, but plasticity of synaptic weight happens really quickly. So surely if you disrupt that you’re going to disrupt learning. It’s also possibly involved in recall one hypothesis. Is that the reason that we can’t recall memories in different cases that I’m used to is because of the disrupted synaptic weight strength, but that’s a hypothesis that hasn’t been proven.  </p>



<p>Paul    00:47:24    That’s your current thinking as well, right? That we have all these latent, uh, inaccessible on recallable, um, naturally unrecoverable memories from learning.  </p>



<p>Tomás    00:47:35    I used it. Yeah. I used to think that, but that’s something I’ve changed my mind on also in the past, in the past few years, um, because what we know about engram cells, even in cases of, of amnesia is that the cells are still functional, right? It’s not like the cells are silenced. Um, it’s not like they’re, they’re, it’s not like they’re hyper inhibited or anything like that. It’s just that they’re not, uh, statistically activated enough during what we consider to be a targeted recall session for, for memory recall to occur, but they’re still, they’re still functional. They can still code for other information. Um, so why is it that they’re not being activated? It seems to me that what’s happening when we cause amnesia, whether we cause Anesia by some kind of experimental amnesia such as, and use some ice in which is a drug that blocks memory consolidation by inhibiting protein synthesis, or if we have pathological cases of amnesia such as traumatic brain injury or early Alzheimer’s disease that we’re causing this reversible in accessibility of memory, but that this type of thing that we’re doing, maybe actually co-opting a natural process, a natural process of, of forgetting.  </p>



<p>Tomás    00:49:00    Um, and that there may be something more general going on here. And I think that we’ve been limiting ourselves when we think about memory accessibility in, in a very binary way, like it’s either storage or it’s either retrieval. Once we start getting into binary, um, binary perspectives, we start getting very oppositional, uh, very categorical and sooner or later everyone is wrong and things can get a little bit unfriendly. Uh, at times when we start thinking more about it being a continuum of accessibility, uh, we start to open up new ways. I think of considering the function of what I would call differential memory expression. Uh, when we say accessibility, it’s kind of a loaded term because we’re saying it’s more difficult to access that in ground, but actually maybe the Ingram is accessible, but it’s just not as inclined to be as expressed under, under those conditions.  </p>



<p>Tomás    00:50:01    And so Paul Franklin and I really recently wrote a perspectives piece on this, which we worked on for quite a long time, which was to try and put together, uh, all of literature that we could on the general topic of forgetting and forgetting in the broad sense. So including amnesia, which would be considered by some kind of forgetting, or kind of pathological memory loss, depending on how you want to look at. And when you consider forgetting as the loss of information that’s already been learned, um, it’s, you know, we, we’ve known about it for over a hundred years. It’s been characterized by experimental psychologists for over a hundred years. We tend to consider forgetting as a nuisance. We tend to accept it as a kind of an unavoidable consequence of having imperfect biological brains. And wouldn’t it be wonderful if we could not forget anything? And if we could all be super Namajunas like the, like what Lauria was studying, uh, but then you start to think more about the value of forgetting and that forgetting maybe not so much a bug of the brain, but a feature of the brain.  </p>



<p>Tomás    00:51:15    And this is particularly pertinent as we get more and more interested in brain inspired AI, um, in recent times. And the idea that memory that the forgetting of memory may be adaptive was put forward a few years ago by Blake Richards and Paul Franklin, who argued that forgetting allows us to have a greater amount of behavioral flexibility because it avoids, uh, long-term memories from overfishing, too noisy environments. Because if we over fit to noisy environments, it’s harder to learn anything new. It’s harder to make predictions. And this would be really maladaptive for vena life. It would be terrible for things like foraging, uh, or for, or for further learning. And that generalization in itself may be considered a kind of a form of, of partial forgetting. Okay. That makes sense that that makes sense that an ecological level, and it makes sense, uh, at, at, for, uh, artificial intelligence to, to a certain extent, but there’s a cost there.  </p>



<p>Tomás    00:52:18    And the cost is that you’re losing that information, which may be valuable to you in the future. Uh, but that’s where the reversibility of memory loss, um, allows us to circumvent that costs because if the memory is recoverable under certain circumstances, uh, then you, the brain can potentially have it both ways in that we can forget what we need to forget in a way that is driven by the predictability of the environment, but that does not mean the information will be unrecoverable in future as, as conditions change. And this was the core idea on which we were trying to build, uh, that perspective piece while we were, we were gradually changing our mind as we were writing. Hmm.  </p>



<p>Paul    00:53:03    So, so on that account, um, and by the way, I like this idea. I like that. I like that paper on that account, as you partially forget, right? Like you were, like you were saying, um, it leads to better generalization, but then your idea is that you have all these latent, uh, eventually recallable memories still within that, because on the earlier account, you permanently lose those quote-unquote memories as you get better at generalization, but you want to say that, uh, that that’s not necessary, that you can have these latent structures that are still there and recallable when needed.  </p>



<p>Tomás    00:53:41    Exactly. I mean, I think that all and grounds are a scent are essentially latent structures because the, and Graham has to retain information, uh, for years and potentially decades of that memory, not necessarily being activated. When we say that we observe forgetting, or we experience forgetting it’s based on an expectation about how a second order expectation about how we should behave or how a subject should behave under certain conditions. But the subject’s brain may have a different idea of what is the most adaptive computation to make. And this is what is already well accepted in the memory extinction literature. So in the experimental psychology of memory and in the neuroscience of memory, we’ve known about how memory can change, not just through reconsolidation, um, which is a relatively recently characterized phenomenon with the last 20 years, but going back even further, we’ve had memory extinction counterconditioning interference that came in blocking effect and so on, and these are all associative, uh, changes to memories that have been formed.  </p>



<p>Tomás    00:54:57    And then you change the contingency so that they’re expressed under different, under different circumstances. Now around the gala style has rightly criticized me for using the term inhibitory memory in that perspectives piece. And it’s pointed out that extinction is not the inhibition of a previously for memory. Uh, I know that, uh, it’s just a convention just as we conventionally describe these memories as associative memories from a pragmatic, a descriptive sense, even though it doesn’t necessarily mean associations are happening in the brain. We know extinction changes the expectancies for a Pavlovian association or an operant association, and that this changes how the animal behaves. It doesn’t mean the end ground is being repressed. It means the on ground is being differentially expressed or differentially used. Um, and it seems to me that the process of memory extinction, which is generally considered to be a form of learning and very loosely described as a form of memory, updating might be the same thing that is happening in natural forgetting.  </p>



<p>Tomás    00:56:05    And there’s a number of reasons to think this one is based on the biology of natural forgetting we’ve known for about 10 years or more. Now that it’s an active process. And if you disrupt certain biological signaling cascades that involve molecules like Raekwon, then you don’t get natural forgetting, uh, across the timescales that are generally studied in animal models. So this was always a puzzle that forgetting required an activity. It wasn’t actually a passive process. And then it turned out that some of the processes that were involved in active forgetting were actually initiated by neuromodulators, such as dopamine, and that this feedback was at some level perceptually generated. And the dopaminergic activity was triggering natural forgetting, uh, of memories in both flies and in mouse models. And then each song’s lab who is an expert on natural forgetting, uh, mainly in flies, but also in rodents.  </p>



<p>Tomás    00:57:06    He then used that ground technology to show that under certain circumstances, uh, naturally forgotten memories could be retrieved, uh, in the brain of the rodent. So more and more, it seems to me, and partly because the rate of natural forgetting and the kind of natural forgetting that you have is a function of your environmental experience. It’s a function of environmental predictability. I mean, you tend to remember things that are more valuable to you in your environment. So if you’re very hungry, you’ll tend to remember memories that are associated with where to find food. If you are socially isolated, you tend to forget social memories. These are all things that are documented in rigorous animal-based based studies. Um, and it seems to me that what’s happening is that forgetting is being modulating by the predictability of your environment that is using the same biological apparatus as prediction, error based learning is in memory extinction.  </p>



<p>Tomás    00:58:04    The difference is that when we’re studying memory extinction in the associative conditioning world, we as experimentalists, we control the conditions that are important. We control the rules of contingency and contiguity in the associative conditioning experiments. So we know when they’re violations, we know when the animal roughly should be having prediction based learning and be updating its representation by whatever mechanism it is, whether it’s extinction, whether it’s counterconditioning or whatever. In contrast, when we study natural forgetting, we don’t know what the animal is really focusing on because often we’re using very different behavioral paradigms. We might be using maze learning, or we might be using object based memory or visual recognition or more complex part ends if we’re studying in humans. Uh, but we don’t really know how the subject is then seeing similar things, shapes and experiences as it’s going about its life, either through further experiences in the laboratory, or if it’s a free living organism or human being, the similar things that it sees out in the world. And in a sense, what might be happening in natural forgetting is a range of different prediction violations that is happening all the time is pruning the structure of generalized memory and grounds, uh, in our brain. Um, and that, in a sense, you could posit that under at least some, some circumstances that forgetting maybe some kind of a form of learning.  </p>



<p>Paul    00:59:38    So can I tell my wife, so I I’m a gifted forgetter and what maybe what you’re telling me is that now I can tell my wife that actually, uh, me forgetting what she finds important, important, you know, some event or something, uh, is actually, I’m, I’m super bright, uh, by forgetting well I’m generalizing well, and that she should, uh, come in to me for forgetting rather than, uh, become upset with me.  </p>



<p>Tomás    01:00:05    Uh, exactly. I mean, my wife tends to get annoyed that I remember so many things. So I, it goes it’s, it goes bad either. It goes bad either way. This is the point of people who have, uh, hyper identic memories. I don’t have, I don’t have perfect memory, but, but I think the more fixed you are with your memories, the more problematic it also gets your you’re not going to be adaptable enough to change. Um, and in terms of social interactions, it’s going to, it’s going to also create annoyances either way, the gradient, cause we’re here again, we’re back to an analog gradient of, of probabilities is there for a reason, our environment is always changing and because our environment is always changing, uh, our memories need to change with them and forgetting in many respects is, is a loaded term. Uh, I think that it’s far better to consider, uh, it, to be about belief formation, that memories, and to a certain extent, instincts are not so much pieces of information that exist on their own.  </p>



<p>Tomás    01:01:15    What they are is assess of latent structures in our brain, which allow us to have beliefs that can make predictions of the world with respect to different environmental affordances. And I suppose that’s, that’s an activist perspective in that we’re concerned with being a part of the world around us and behaving appropriately. And when we consider forgetting, we’re always thinking about there’s a right answer, but in nature there isn’t always one right answer. There’s many things you can do. It changes. And in any situation, there are many behaviors you can use a forgetting just simply means not using the one that we would have expected you to use. I don’t think that ether logically speaking, that is the most, um, useful way of thinking about it. Um, but we can look at different types of memory to, to test this idea in very superficial terms. So we can think about memories that we never forget, and then memories that we, we always forget.  </p>



<p>Tomás    01:02:13    Um, so I’ve often been struck by, uh, how elderly people who might be in their seventies or approaching 80 can do things like get on a bicycle and cycle it perfectly well, even though they haven’t cycled a bicycle in 30 or 40 years. So they have a complex motor memory that they learned at some point in their lives. They haven’t used it in, in possibly decades. And then they can just go in cycle a bike. You don’t seem to have natural forgetting for complex motor memories. And it occurred to me that, and there’s not a lot of data on this, but that it’s could be because the physical environment, uh, for our bodies as to how they need to move in the world, doesn’t really change because the laws of motion are based on gravity as it is on earth. And so the predictability of the environment isn’t changing very much.  </p>



<p>Tomás    01:03:07    There are perceptual modalities, which seem to be more fixed. And those memories, it seems to me are also more stable on the other hand, social memories fluctuate all the time, uh, because social memories change all the time and other types of human specific semantic information also changes so much that we are constantly updating them, partly through a process of forgetting. And then at the extreme end, you have infant Thailand Nisia, which are, which is the type of amnesia, which affects 100% of us, which is why, why don’t we remember anything that happened in the first two to three years of life. And this seems to be a genetically encoded form of forgetting, which is specific to altricial mammals that basically wipes the slate clean for episodic and contextual memories for early life memories. And we still don’t really understand why that exists, but like the other kinds of amnesia that we discussed today, this seems to be reversible.  </p>



<p>Tomás    01:04:08    And it seems that infant memories are being retained in the brain of animals, right into adulthood, despite all of the things that are happening in late infancy and adolescence, and in early adulthood, all of the neurogenesis, all of the myelination effects, all of the synaptic pruning is not causing catastrophic, forgetting, um, in, uh, information, very sense of, of that information. So I think that to, to answer your question, the way we need to think about memory is new. Convolutions in the functional, if not structural typology of the brain, that it in effect, as we have new experiences, we’re making the structure, uh, more nuanced, uh, in a way that alters this structure of the connectome in very subtle ways, which allows for stable information. Um, and it’s accessibility to change with experience. Um, and this in itself would create a framework where new instincts renew, innate structures could also evolve on top of the same.  </p>



<p>Tomás    01:05:17    So rather than thinking about always starting from the same structure and filling it in with pieces of, of Shannon information, we need to think about the structure as a part of the informational relationship. And there’s some kind of semiotic interaction between the information that is embedded in the brain and the environmental affordance of the, in the world, which is relevant to that particular brain structure. And of course there are going to be embedded simulations and recursions that are projected in the brain. There are going to be many micro environments within the brain where the brain is feeding back onto itself, of course. Sure. But the point is that the evolving structure either evolving through a process of learning or evolving through a process of actually biological evolution is going to be one that is always, uh, developing in a progressive, but completely undirected undirected way. Um, and I think forgetting is, uh, in a sense of folk psychological term that we’ve used to as a catch-all for whenever memory is not retained in, in a way that we would not usually expect it to be retained,  </p>



<p>Paul    01:06:35    Do folk psychological terms like forgetting and like memory, do they impede progress? Uh, because we end up, you know, we often redefined terms and instinct as well. Right? You said that there are like 13 to 20 definitions of instinct. Um, do they get in the way, I mean, do they get in the way of your own thinking or can you move beyond the folk psychological concepts?  </p>



<p>Tomás    01:06:57    I don’t think they get it in the way of our thinking, uh, very much. Um, I think that it’s really important to understand, uh, where these ideas came from. So when we define something as an engram or as an instinct or as a representation, or when we use these filler inwards in Euroscience like coding, uh, I think we really need to examine where the basis of that, of that idea came from. Does it even have a basis? Is it just something we use to be pragmatic and that’s okay. We have to be pragmatic when, when doing experiments, um, in general, I think that we need to be very open-minded about how we’re approaching the neuroscience of memory or possibly the neuroscience of anything. I think that there’s no need to be very rigid in how we’re approaching brain information, because we just know so little, I mean the more and more we go forward in neuroscience, the more comes clear.  </p>



<p>Tomás    01:07:56    We don’t really know how, um, information is being coded for me. It is still about the intersection of memory, um, and instinct. And I’m becoming quite interested in the question of, of the origin of instincts, uh, because the standard Darwinian view is that they evolve randomly by natural selection. And then we’re recently, well, I suppose this happens every few decades, the market and ideas come back to the floor where people are interested in Chan’s transgenerational memory, which isn’t tenable in many respects, except in very interesting exceptions. And it seems to me that understanding memory and ground biology is going to give us insights into the future that contribute to understanding the origin of instinct.  </p>



<p>Paul    01:08:50    You have a whole story about how, um, you can develop instincts through memory. Maybe you can, uh, elaborate that story.  </p>



<p>Tomás    01:08:58    Well, the idea of transgenerational instincts is that if you have a person who learns certain things in their lifetime, then the information somehow goes across from their brain into their sperm or egg producing cells and into their offspring, which then develop. And that information manifests in the brain of the offspring. And that would be a kind of Lamarckian evolution. There may be some cases where that kind of thing happens. And I’m thinking about Brian Dyess and Carrie wrestlers, very interesting experiments on this and of course, or other transgenerational epigenetic experiments where a response to stress or for want of a better term stress or emotional set points do show some kind of epigenetic effect. But that’s not what I’m talking about. I’m talking about the transfer of specific pieces of information, like, like an engram. I don’t think it’s tenable for, for that to work, even though some very interesting ideas have been foot put forward by people like Jean Robinson, uh, because of Ellucian is primarily conservative.  </p>



<p>Tomás    01:10:05    The most important thing about, uh, biological evolution is once you find the right structure, you don’t change it randomly with every generation. You have to have the occasional accident happening, and that will create a new thing which might be selected for, but generally speaking, you want to keep everything hyper, hyper conservative, but there’s an in-between type situation between Lamarckian evolution and very conservative Darwinian evolution, which is based on the Baldwin effect, uh, which was put forward by James Mark Baldwin to turn to the century. And he wasn’t just talking about learning and memory. He was talking about all kinds of biology. And the basic point is that biological plasticity in the lifetime of the organism allows you to test drive different states, different, different spaces that are possible for you to exert, um, in your lifetime, that could be a different biological state, or it could be the niche that you choose to or choose to occupy ecologically.  </p>



<p>Tomás    01:11:05    When you find something that works. Um, it then creates a kind of a niche where by a random mutation, that results in a developmental change that mimics that type of experiential plasticity will be immediately valuable to the organism. Um, so if you don’t have the Baldwin effect, you, of course you can still have evolution. And that’s, that’s just the fact, uh, because random mutations happen all the time. Sometimes they’re useful when they’re useful, they become fixed. But, um, PIJ observed in the seventies, in his, one of his final books, behavior and evolution that this creates a particular problem for behavior because he just didn’t see it as plausible that a new behavior that evolved by purely random mutation would immediately become useful to the organism and its environment. First of all, how could it happen if it hadn’t been tested in the lifetime of the animal, uh, there’s issues with social acceptance, would you, would you be accepted by your conspecifics, which creates problems for reproduction?  </p>



<p>Tomás    01:12:10    Uh, but also, you know, imagine the scenario where, um, we have an ancestral species that isn’t afraid of predators. So prey animals are naturally afraid of predators. Like the mice in my lab are afraid of the smell of Fox urine, even though they’d never seen a Fox in their life or a Hawk coming from above. Exactly. Exactly. So they have instincts that are relevant for the predators that they evolved with, even though they’ve never experienced them. Now, at some point there was an ancestor that didn’t have that instinct, right. Because that’s just obvious that’s the true is, um, yet those ancestors survived, otherwise their children would not be here. Right. So how do they survive in the absence of having those instincts? Well, obviously they learned, you know, they learned the hard way to adapt to those situations and they would have formed memory on grumps.  </p>



<p>Tomás    01:13:01    The ones that survived the encounters with said predators would have evolved. Men would have for memory and grams and eventually certain latent states and the brain would become fixed. Uh, you, you learn them by experience or by some kind of, um, social learning. And if you don’t learn them, you’re not going to survive. Uh, you’re not going to reproduce. This creates a situation whereby it seems to me that different brain states different Ngrams will be formed and discovered by a process of learning. That would be more or less similar in all the individuals in the population. And that this allows the population to survive in a particular environmental niche, despite the dangers that are there and what happens by development, excuse me, what happens by learning can definitely happen by, by development because development is much more powerful than learning. We can only do a few things in our brain with learning.  </p>



<p>Tomás    01:13:58    We can only change so many things in our lifetime, but development is the reason why you’re human and not a tiger or a fruit fly. You know, development can decide how your size, it can decide whether you have seven or four layers of cortex. If you’re very unfortunate, a development is what decides whether you have significant diseases or whether you have above, uh, average, uh, capacity in some, in some physical or mental way. So development is, has a huge amount of power to it, uh, whether that’s influenced by genetic variation or, or stochastic developmental changes that occur along the way. So the point is that anything that you do by learning you can definitely do by development. So once you find an end ground that works in the population, um, it’s not going to be very difficult for an Ingram, uh, an instinctual brain circuit to come along by ran some kind of random mutation or assortment of genotypes that allows a particular brain circuit to be catalyzed in a developmental way that mimics a pre-existing on-ground.  </p>



<p>Tomás    01:15:12    And what you would have then is a, a type of learning, influencing the origin of an innate structure in the brain that is not transgenerational. There’s no magic molecules involved. Um, it’s simply a kind of parallel or convergent evolution that is happening, not in one generation, but in less generations than you would require by random Darwinian evolution. And this type of evolution was, was modeled by, by Hinton, by Jeff Hinton in, uh, in the seventies. Um, and it’s been illustrated to a certain extent in various biological conditions. And the Baldwin effect is not considered to be radical. It’s not considered to be, you know, it’s not considered to be heretical to anything that we know about Darwinian evolution, but it is considered to be a rather exotic case of, of Darwinian evolution. But it seems to me that if the biology of structuring an engram is the same as the biology of structuring an engram memory and Graham  </p>



<p>Paul    01:16:23    And I N grim. Yeah. And yeah, the different you say Ingrid, I mean, it’s confusing because the one is Eng and one is I N G right.  </p>



<p>Tomás    01:16:31    And Graham with an E being a memory and grab in grind with an  and an instinct, uh, if they’re made of the same biological stuff, uh, even if, if Randy gala spells rice and they’re both molecular, or if they’re both anatomical or whatever, they’ve, as long as they’re made at the same stuff, then you create the opportunity for the evolution and learning processes to be continuous. Um, and in a sense, what you have is this rolling evolution of Leighton structures, that form belief states, um, convergently happening by learning evolution, learning evolution, influencing one, um, in order to better adapt to the environmental affordances that you, that you have to, that you have to deal with  </p>



<p>Paul    01:17:18    In the case of, let’s say a mouse learning, you know, that a Hawk is bad, right. And then it, that, that, and then they form an in gram. So they’ve learned an E in Graham and Graham. Um, let me, let me ask you, uh, your thoughts, right. So I’m trying to connect that with the, uh, instinct story that future generations will have that instinct right through the developmental process. Is it because the mice that learned the valuable in gram, uh, had a predisposition already to, you know, through their structural, through their previous development, right? So maybe they were kind of instinctually tuned to learn the good in gram and that they’re, then their offspring are also tuned to learn the good in gram, but, uh, through development. Um, some of them, it actually becomes an instinct because their development happens in a slightly different way than their mom and dads. Does that make sense?  </p>



<p>Tomás    01:18:17    Uh, I think it makes complete sense. I think that’s the missing ingredient to the mechanistic basis of, of how it’s happening, um, that there’s some kind of eliminates of plasticity, not just constructive plasticity, but there’s something that is waiting to be pushed off a cliff in how their brain is being formed. And that sort of almost stereotyped kind of plasticity. It can be initiated by learning or by initiated by changes in development. And it speaks very much to wash. Uh, Peter Robin hen singer was talking about on this podcast a few episodes ago, um, about how the circuits can self-assemble in slightly different ways. And you can’t have an infinite amount of conditions pre and coded in the brain. Okay. That’s, that’s not possible. What you can have is an exponentially increasing range of possibilities, which, uh, can be initiated, um, by developmentally stochastic events, which are not low probability.  </p>



<p>Tomás    01:19:29    In other words, there can be changes that are about to occur that haven’t occurred, which are more likely to occur than any random change because of the way, um, because of the way the system is structured. So at the simplest sense, there are two populations of neurons that are easily able to come in contact with each other that are not in contact with each other, but could be easily used to form a new top of logical layer that would essentially allow for information and coding. Um, in, in other words, in other words, a state of potentiality, a state of potential plasticity that has informed, but can form. And as long as you have lots of growing, many different possible states of potential plasticity that haven’t yet happened, the ball can be easily pushed down the hill either by learning or by evolution. And that I think is really important because it means that you’re not just really waiting forever for a random mutation to completely make a new circuit, which isn’t plausible. What you’re waiting for is the developmental plasticity. Um, that is, you know, genetically set off to, to just find that next step, which is one of the next potential steps, which  </p>



<p>Paul    01:20:49    Is closer than, than completely random. Exactly,  </p>



<p>Tomás    01:20:52    Exactly. And I think that that’s the, that’s the point of the thesis of, of a self-assembled brain is that it’s one step at a time, but that at each step in each section, you’re creating a number of different possibilities, which allow you to be adaptable, but allow that adaptability to have meaning across individuals in a population that has some kind of conservation and some kind of meaning that, that, that is common between us.  </p>



<p>Paul    01:21:23    It’s actually reminds me of Stuart Kauffman Kauffman’s concept of, um, the adjacent possible, right? So that, uh, you know, every new step we take opens up unknowable, but next new steps that, um, in, in this story, right, those next new steps, we’ll be learning, learning during the lifetime that then, uh, narrow down the future possibilities, but we can’t really predict how that’s going to happen. Sorry. So, uh, as an aside, so Tomas we’ve covered so much ground here. Um, I, I’m going to ask you maybe what seemed like random questions that are, but are connected to a lot of what we’ve talked about. One of the things I was wondering when you’re talking about instinct is whether if this makes sense to you, this question, uh, whether instincts can change during the course of a lifetime, uh, you know, I’m thinking about like the elderly elderly, right? Can they have different, can they have through their development along their lifetime? Is it wrong to say form new instincts or would the instincts always already be there because you think of instinct and memory as a continuum? Um, so would it be wrong to say that, I mean, is there, is there a possibility that instincts could change, um, coded through development through the lifetime?  </p>



<p>Tomás    01:22:39    Um, I’m fascinated by that question. Um, and I, I really liked to take both an evolutionary and a developmental approach when trying to understand memory and instincts. I mean, we know they show differences across the lifespan. I mean, those of us who are parents probably notice the most acutely, uh, because we have really strong parental instincts, but they don’t express generally speaking until you’re at the point of life where you’re, where you’re reproducing. Um, similarly there are other instincts that don’t express until adolescents and so on. So we know that instincts show up at, at age appropriate times. Uh, but we also know that instincts change in our lifetime based on relevant experience. So we, we, a large part of education can be argued to be about the nuance, creating nuance on, on, on human instincts, uh, how we respond to each other socially. Um, uh, a very primate level is not adaptive for modern human society.  </p>



<p>Tomás    01:23:44    And we spend a great deal of time, uh, in trying to get people to understand themselves and to understand how to behave in a socially acceptable way. And we do this first by basic Pavlovian and operating mechanisms. And as we get older, we, you know, we teach people to have more insight about themselves. Um, this is very much, I think, dealing with our innate behaviors, but, but you can do more simple experiments in, in rodents to where instincts can be modulated by experience in the same way that that memories can. And the more I think about this, the more it seems to me that the brain doesn’t even know what an instinct or a memory is. In other words, that once you have a structure in the brain that has some information about the world, um, it’s just a part of the brain and it’s going to be maintained as a part of the brain, and it can be updated in the lifetime of the organism by a process of learning.  </p>



<p>Tomás    01:24:40    It may have emerged by a process process of evolution by a mechanism of developmental plasticity. It may have emerged by a process of learning by the mechanism of synaptic plasticity in the broad sense, altering the connectome. Uh, but the origin doesn’t matter. What matters is the product where the product is information in the brain that is relevant to the world. Um, why would we need to know what was a memory and what was an instinct? What we need to know is what we think is true, and what we need to be able to do is to change it as our environment changes in order to maximize our chances of survival and reproduction in an adaptive way for ourselves and for our conspecifics.  </p>



<p>Paul    01:25:22    So think, you know, reading your papers, and they’re always pictures, right? Of like these networks of neurons, some of which are a certain color. And those are the in gram neurons. Do you think of memories as having definitive boundaries, right. Like, so let’s say you have a, a network of 2000 neurons that are part of the in gram, uh, is that the boundary of the memory or, you know, does the boundary of, well, how do you think about the boundaries of memories? Like what does an individual memory, for example,  </p>



<p>Tomás    01:25:53    I’m not sure we’ll ever be able to identify an individual memory from the way we can an individual gene. When we look at a population of engram cells due to technical limitations, some of those are backgrounds, cells have a home cage environment. Yeah. But we’re not getting all the engram cells because we’re missing certain immediate, early genes that may have also been active. We may miss cells that were important for learning, but that didn’t express meat immediately. Gene, we’re not activating every brain region usually. So we’re only looking at a section of the engram. Uh, but I think probably most troublingly, uh, there seem to be cells that are not active at the point of learning, but become part of an Ingram, uh, after learning. And this shouldn’t be very surprising when you think about it. So there may be cells in the vicinity of the activated cells, and we’re only labeling the activity itself with current technology that were completely nothing to do with the learning experience.  </p>



<p>Tomás    01:26:48    But that plasticity that happened during learning then brought those cells into the engram and this changes everything. And this has been observed, um, in certain studies that people haven’t made a big deal out of, but, but, but, but it’s known to be the case. So, uh, when does an end ground stop, uh, and stars, we have no way of knowing with current methods, but even if methods were better, I’m not sure we’d be able to answer that question, because I think that an engram is a subset of the information in the brain, which we’re tying to a particular experience as investigators, but that in reality is not going to have a, um, one to one relationship with anything in particular. So a gene has a relationship to a protein it’s, one-to-one you go from one concrete thing to another concrete thing. I don’t think we’re going to have a one end gram, one meme relationship or one and ground one bit of information relationship, because they’re all going to be tied up with each other.  </p>



<p>Tomás    01:27:49    Of course there is separation. So this is one of the original premises that I brought up in this discussion. Of course there is separation of very similar thing as separation of your face versus other faces that I’m aware of, but there’s also generality between those things. And if there wasn’t generality, I wouldn’t have the concept of faces. And part of those is part of that generality is based on the idea that we have an innate structure in the fusiform gyrus for being able to see human faces, which is why we can have so many different memories of them. Uh, and we separate those, but they’re not going to be completely in isolation. Um, so I’m not sure we’ll ever be able to identify the crucial boundary of a particular memory. Uh, but maybe I’m wrong. Maybe it would be more, maybe, maybe it will be more straightforward in 10 or 15 years, or maybe around the gala stalls. Right. Which is find the gene that encodes for my memory of your face in a particular Southern my fusiform gyrus. Right.  </p>



<p>Paul    01:28:52    Okay. Nicely done there. Um, the reason why, you know, I’m kind of, you know, have thought about that, thinking about different engrams, different what separates them is because when I think of memory colloquially, right. I think of my own, my own subjective experience of my memories. Do you, do you think that it’s, um, I’m, I’m imagine what you’re going to say is that there’s not an isomorphism between the in gram cells and your subjective experience of memory, but do you think it’s important to, and is it on your to-do list to try to connect the mechanisms of Ingram cell, uh, memories to our, you know, since our subjective sense of our memories,  </p>



<p>Tomás    01:29:34    I get asked that question quite a bit when it comes to developmental amnesia, because people ask me whether infants are really having the same subjective experiences as adults, um, which is a broader question than manipulating and grim cells themselves. And all I can say is we rely on the behavioral facts that we measure, and we have the same standard for behavioral measurement and infants as we do in adulthood. It’s not the same when you do human studies because you don’t have the same verbal subjective reporting. I think this is a challenge for translational studies, and certainly we want to be able to translate and ground biology into the study of human memory. But I think from a broader philosophical perspective, I don’t like to think about the subjective experience of recall as being relevant for how we understand memory recall at a scientific level, except when shining insight into basic facts of memory, which may be relevant for, uh, finding holes or points of confusion in different theories that you’re, you’re forced to accept as a student.  </p>



<p>Tomás    01:30:45    I mean, one of the reasons that I got interested in amnesia was as I got older, I became more aware of different things that had meaning when I was younger, that I wasn’t really paying attention to. Um, and you start to reevaluate different scenarios that you found yourself in as, as a younger person, as a teenager and so on. And you start to remember things that are quite innocuous, uh, like how did I even remember that particular episode? It was so meaningless. I’d never reflected on that before, except now I kind of, now I know what that person meant now. I know what they were saying. Um, that kind of subjective experience did trigger me to question, uh, how we understand forgetting because if very boring, salient memories can survive for so long, then it’s, it seemed to me that very important memories should also be surviving, even cases of amnesia. So I think there are clues from the everyday intuition that we get from just the realities of our own memory. But I think that’s a different thing from being too concerned with, with subjective experiences of the boundedness or the Qualia of, of memory recall.  </p>



<p>Paul    01:31:59    I want to switch gears and just in the last moment or two, uh, ask you a kind of career type question or questions, um, you know, thinking about moving forward and, you know, just, just, uh, going off of what you were just talking about, um, and using different tools. I know that you are a, an advocate of interdisciplinary research, right. And collaboration, and it seems to be one of the cornerstones, uh, of your approach in your career. And, you know, when I was in grad school, this was always touted, right. Um, all these programs are interdisciplinary and that’s now every, every, every neuroscience program says that. Right. But it’s really, uh, and it sounds great. Um, and it, and sometimes it’s true. Sometimes it’s not so true, but it’s really hard to quantify. I think the actual benefit of, uh, you know, of, of doing this cross disciplinary research and incorporating different types of, uh, experimental approaches and different types of people. Um, so in a large sense to me, it’s really vague the benefits of it. Um, and I wish I could quantify it. Right. Uh, do you have a sense of how it’s really helped your own research program, et cetera? You know,  </p>



<p>Tomás    01:33:09    I’ve always just been driven by the question. Uh, I want to know something I’m interested in interesting questions, and I don’t care wash the tools or the theories that are needed to, to answer them. I teach my students that my undergraduate students, that neuroscience is not a discipline. So biochemistry is a discipline. Genetics is a discipline. Physiology is a discipline and psychology are disciplines, but neuroscience by definition is not a discipline. I mean, pretty much anything that has science in its name is probably not a discipline. It’s some kind of, kind of admiration of disciplines. But I also think that we’ve gone through a period in history where science was just super conservative. And I was trained as a molecular biologist originally in my PhD and the philosophy. And it’s still the philosophy in many places is that everything has to be hyper conservative. You have to stay in your lane.  </p>



<p>Tomás    01:34:07    You have to get really, really good at the particular technique you’re trained on, uh, in a sort of craftsmanship kind of way. You have to fall into many holes. You have to climb out of those holes because that’s how we were trained in the sixties and seventies. And that’s the way everyone should be trained, uh, going forward. You’re forced to abandon that when you get interdisciplinary, when you’re getting interdisciplinary, you’re forced to kind of have many things going on at the same time. And you’re forced to learn on the fly part of that is collaborations. Uh, but I think a larger part of that is that our students are a lot more impressive than we give them credit for when we give them the opportunity to be. Um, one of the reasons that, uh, my lab is able to work on a number of broad questions is that first of all, it has the common thread of memory and grim cells, which, which unites everything.  </p>



<p>Tomás    01:35:01    But also because I continue to be incredibly impressed by the performance of my graduate students. And also to a large extent to my, to my undergraduate students, once you start to move away from very conservative, this is the way we’ve always done things. So this is the way you should do things. Um, when students are pushed and encourage, uh, I don’t mean in a way that’s full of sort of anxiety or unrealistic expectations, but encouraged to find their own ways of putting different scientific topics together, um, of mixing, uh, one field with another one part of which might be very conservative stuff. The other might be more sort of more frontier, more recent technology. And in one area that, that mixing one thing with another one thing being basic, and one thing being a very, very frontier often kind of produces the best results provided that they’re focused on the question and above all things that they are rigorous.  </p>



<p>Tomás    01:36:06    And when you, when you teach students to be rigorous about their own data, they will always have a greater degree of a seriousness with it. Then you will have, uh, as a pie. And I think what’s allowed us to do this. Partly is how everything has changed in the last 15 years. We’ve had the internet for longer than that, but in the last 10 to 15 years is where everything has become. So, uh, unified that students are going to neuro match academy, that they can look watch lectures with anybody, uh, online that, that people are zooming into lab meetings to tell them what they think about the journal club paper that we’re covering, that students are forming social networks, that all of the information that you want is immediately online, that we have practical classes online, that students build their own things. Now we’re, we’re doing things in a way that is much more liberated than it was, uh, 15, 20 years ago, and certainly more than 30, 40 years ago. Um, and I think as long as it’s, as long as it stays rigorous and as long as we take feedback from each other and as long as it’s okay to make mistakes as we go along, um, then I think that we, we, we, we start to see interdisciplinary interdisciplinary work, not as, as a challenge or a problem, but, but as an everyday way of, of doing science. And I hope that other areas of biology start not start doing that also. Hmm.  </p>



<p>Paul    01:37:33    Very good final question. And this is really just for me. I was reflecting. So I went snowboarding yesterday with, uh, my snowboarding buddy, and, you know, he’s not a scientist, but sometimes I tell him about what I do and he’s bright guy, so we can keep up. Um, and I was reflecting on how many friendships I’ve kept in and out of my science life. Um, the question is what percentage of your friends, um, understand what you do have a, you know, at least a cursory or, or slightly above cursory understanding of what you do are all your friends, scientists, or do you have friends outside of science that you keep up with as well?  </p>



<p>Tomás    01:38:12    That’s an interesting question. I don’t know. I’ve never quantified that, um, I’m engaged in a fair amount of public communication of science. And for that reason, even my non-academic friends who aren’t academics at all are reasonably aware of what it is, what it is that I do. Uh, but I do think that us as scientists, that we need to be spending much more time in engaging with the public. Um, and I do worry that as we become so hyper competitive. And so hyper-focused on research that it sort of removes us from the everyday of life. And that’s been observed by many academics in the past that the mechanism that, which through which scientists are kept out of public conversations are, they’re not very direct. It’s not by any kind of, um, any kind of direct carrot or stick mechanisms. It’s mostly because everybody is just so harried by the everyday stresses of, of doing their job.  </p>



<p>Tomás    01:39:17    And that includes research scientists. But I think what we’ve seen in the past five to 10 years with certain political movements and with the ominous, uh, rapidly accelerating problem of climate change, uh, we don’t really have a choice except to, except to properly engage in a serious way with not just our friends and family, but, but with colleagues and other citizens who aren’t scientists. And I would say that the most important thing that I have learned when interacting with people, I know who aren’t scientists about scientists, is that we have to completely reorientate how we’re approaching. And we cannot just go and tell them the way that they need to interpret the science. I mean, it’s, there’s no difference between me telling a lay person how they need to interpret neuroscience and me going to an in vivo physiology conference and telling them how they should interpret the memory on ground.  </p>



<p>Tomás    01:40:17    If that doesn’t work the public fund science, and they only should hear about science as a collaboration. Uh, we are doing science with them when we’re asking the public about science or telling them about science, we need to be asking them what they think is interesting. What they think is important. And we need to be in a non-confrontational non-confrontational way, meeting them where they are offering them new insights that our work has produced, and then discussing different ways forward, where the process is considered a partnership and not a one way communication of information. Uh, communication is, is bilateral. And I think we as scientists, we need to listen to the concerns of regular people, um, and that involves being as generous as possible to views that may not be very well articulated all of the time.  </p>



<p>Paul    01:41:18    Thank you for being generous with your time. Tomas, this has been fun. I appreciate it.  </p>



<p>Tomás    01:41:22    Thank you for having me.  </p>

</div></div>


<p>0:00 – Intro<br />4:05 – Response to Randy Gallistel<br />10:45 – Computation in the brain<br />14:52 – Instinct and memory<br />19:37 – Dynamics of memory<br />21:55 – Wiring vs. connection strength plasticity<br />24:16 – Changing one’s mind<br />33:09 – Optogenetics and memory experiments<br />47:24 – Forgetting as learning<br />1:06:35 – Folk psychological terms<br />1:08:49 – Memory becoming instinct<br />1:21:49 – Instinct across the lifetime<br />1:25:52 – Boundaries of memories<br />1:28:52 – Subjective experience of memory<br />1:31:58 – Interdisciplinary research<br />1:37:32 – Communicating science</p>
]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/702f4b49-f771-4763-b184-11cd4695ec5a-127-Tomas-Ryan-Public.mp3" length="98850130"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[




Support the show to get full episodes and join the Discord community.











Tomás and I discuss his research and ideas on how memories are encoded (the engram), the role of forgetting, and the overlapping mechanisms of memory and instinct. Tomás uses otpogenetics and other techniques to label and control neurons involved in learning and memory, and has shown that forgotten memories can be restored by stimulating “engram cells” originally associated with the forgotten memory. This line of research has led Tomás to think forgetting might be a learning mechanism itself, a adaption our brains make based on the predictability and affordances of the environment. His work on engrams has also led Tomás to think our instincts (ingrams) may share the same mechanism of our memories (engrams), and that memories may transition to instincts across generations. We begin by addressing Randy Gallistel’s engram ideas from the previous episode: BI 126 Randy Gallistel: Where Is the Engram?



Ryan Lab.Twitter: @TJRyan_77.Related papersEngram cell connectivity: an evolving substrate for information storage.Forgetting as a form of adaptive engram cell plasticity.Memory and Instinct as a Continuum of Information Storage in The Cognitive Neurosciences.The Bandwagon by Claude Shannon.


Transcript

Tomás    00:00:03    We were in the privileged position to be able to label and manipulate specific ensembles themselves in the hippocampus of awake behaving mice and in doing so, we were able to show that these specific ensembles of neurons were somehow containing, or at least are a part of the information that is allowing us to catch. I gone to specific memories. You start to think more about the value of forgetting and that forgetting maybe not so much a bug of the brain, but a feature as long as they’re made at the same stuff, then you create the opportunity for the evolution and learning processes to be continuous. Um, and in a sense, what you have is this rolling evolution convergently happening by learning evolution, learning evolution influencing one another.  



Speaker 0    00:01:06    This is brain inspired.  



Paul    00:01:20    Welcome everyone. It’s Paul. So in the last episode, I spoke with Randy gala still about his idea that the in Graham, the physical substrate of our memories must be stored within neurons in some sort of stable molecular substrate that goes against the grain of most modern neuroscience, which suggests our memories are somehow stored among the connections and structure of ensembles or networks of n...]]>
                </itunes:summary>
                                                                            <itunes:duration>01:42:39</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 126 Randy Gallistel: Where Is the Engram?]]>
                </title>
                <pubDate>Mon, 31 Jan 2022 16:57:11 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-126-randy-gallistel-where-is-the-engram</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-126-randy-gallistel-where-is-the-engram</link>
                                <description>
                                            <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2022/01/art-126-01.jpg" alt="" class="wp-image-1632" />



<p class="has-text-align-center"><strong>Support the show to get full episodes and join the Discord community</strong>.</p>



<div class="wp-block-image"><a href="https://www.patreon.com/braininspired"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2021/09/become_a_patron_button@2x.png" alt="" class="wp-image-1434" /></a></div>



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2022/01/randyHead-767x1024.jpeg" alt="" class="wp-image-1633" width="192" height="256" /></div>



<p>Randy and I discuss his long-standing interest in how the brain stores information to compute. That is, where is the engram, the physical trace of memory in the brain? Modern neuroscience is dominated by the view that memories are stored among synaptic connections in populations of neurons. Randy believes a more reasonable and reliable way to store abstract symbols, like numbers, is to write them into code within individual neurons. Thus, the spiking code, whatever it is, functions to write and read memories into and out of intracellular substrates, like polynucleotides (DNA, RNA, e.g.). He lays out his case in detail in his book with Adam King, <a href="https://www.amazon.com/gp/product/1405122889/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=1405122889&amp;linkId=1362bd57c9102eee598fc51ed3aa6126">Memory and the Computational Brain: Why Cognitive Science will Transform Neuroscience.</a> We also talk about some research and theoretical work since then that support his views.</p>



<div class="wp-block-image"><a href="https://www.amazon.com/gp/product/1405122889/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=1405122889&amp;linkId=1362bd57c9102eee598fc51ed3aa6126"><img src="https://braininspired.co/wp-content/uploads/2022/02/book.jpeg" alt="" class="wp-image-1635" width="209" height="300" /></a></div>



<ul><li>Randy’s <a href="https://psych.rutgers.edu/faculty-profiles-a-contacts/96-charles-randy-gallistel">Rutger’s website</a>.</li><li>Book:<ul><li><a href="https://www.amazon.com/gp/product/1405122889/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=1405122889&amp;linkId=1362bd57c9102eee598fc51ed3aa6126">Memory and the Computational Brain: Why Cognitive Science will Transform Neuroscience.</a></li></ul></li><li>Related papers:<ul><li>The theoretical RNA paper Randy mentions: <a href="https://arxiv.org/abs/2008.08814v3">An RNA-based theory of natural universal computation</a>.</li><li>Evidence for intracellular engram in cerebellum: <a href="https://www.pnas.org/content/111/41/14930">Memory trace and timing mechanism localized to cerebellar Purkinje cells</a>.</li></ul></li><li><a href="https://youtu.be/JCDaURmKPm4?t=4211">The exchange between Randy and John Lisman</a>.</li><li>The blog post Randy mentions about Universal function approximation:<ul><li><a href="https://www.lifeiscomputation.com/the-truth-about-the-not-so-universal-approximation-theorem/">The Truth About the [Not So] Universal Approximation Theorem</a></li></ul></li></ul>


<div class="lightweight-accordion"><span>Transcript</span><div class="lightweight-accordion-body">

<p>Randy    00:00:03    Usually when I ask neuroscience is how you and code a number either in Asian apps or however many synopsis they think might be necessary. That’s a conversation stopper. All I get is hand ways, you know? Well, you see, there are lots of synopsis and it’s a pattern. It’s an absence. Well, could you say something about the pattern? I mean, how does the pattern for 11 different for the pattern from three, for example, can you shed a little light on that? You do not want to answer that question. The end is the low-h...</p></div></div>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[




Support the show to get full episodes and join the Discord community.











Randy and I discuss his long-standing interest in how the brain stores information to compute. That is, where is the engram, the physical trace of memory in the brain? Modern neuroscience is dominated by the view that memories are stored among synaptic connections in populations of neurons. Randy believes a more reasonable and reliable way to store abstract symbols, like numbers, is to write them into code within individual neurons. Thus, the spiking code, whatever it is, functions to write and read memories into and out of intracellular substrates, like polynucleotides (DNA, RNA, e.g.). He lays out his case in detail in his book with Adam King, Memory and the Computational Brain: Why Cognitive Science will Transform Neuroscience. We also talk about some research and theoretical work since then that support his views.







Randy’s Rutger’s website.Book:Memory and the Computational Brain: Why Cognitive Science will Transform Neuroscience.Related papers:The theoretical RNA paper Randy mentions: An RNA-based theory of natural universal computation.Evidence for intracellular engram in cerebellum: Memory trace and timing mechanism localized to cerebellar Purkinje cells.The exchange between Randy and John Lisman.The blog post Randy mentions about Universal function approximation:The Truth About the [Not So] Universal Approximation Theorem


Transcript

Randy    00:00:03    Usually when I ask neuroscience is how you and code a number either in Asian apps or however many synopsis they think might be necessary. That’s a conversation stopper. All I get is hand ways, you know? Well, you see, there are lots of synopsis and it’s a pattern. It’s an absence. Well, could you say something about the pattern? I mean, how does the pattern for 11 different for the pattern from three, for example, can you shed a little light on that? You do not want to answer that question. The end is the low-h...]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 126 Randy Gallistel: Where Is the Engram?]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2022/01/art-126-01.jpg" alt="" class="wp-image-1632" />



<p class="has-text-align-center"><strong>Support the show to get full episodes and join the Discord community</strong>.</p>



<div class="wp-block-image"><a href="https://www.patreon.com/braininspired"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2021/09/become_a_patron_button@2x.png" alt="" class="wp-image-1434" /></a></div>



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2022/01/randyHead-767x1024.jpeg" alt="" class="wp-image-1633" width="192" height="256" /></div>



<p>Randy and I discuss his long-standing interest in how the brain stores information to compute. That is, where is the engram, the physical trace of memory in the brain? Modern neuroscience is dominated by the view that memories are stored among synaptic connections in populations of neurons. Randy believes a more reasonable and reliable way to store abstract symbols, like numbers, is to write them into code within individual neurons. Thus, the spiking code, whatever it is, functions to write and read memories into and out of intracellular substrates, like polynucleotides (DNA, RNA, e.g.). He lays out his case in detail in his book with Adam King, <a href="https://www.amazon.com/gp/product/1405122889/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=1405122889&amp;linkId=1362bd57c9102eee598fc51ed3aa6126">Memory and the Computational Brain: Why Cognitive Science will Transform Neuroscience.</a> We also talk about some research and theoretical work since then that support his views.</p>



<div class="wp-block-image"><a href="https://www.amazon.com/gp/product/1405122889/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=1405122889&amp;linkId=1362bd57c9102eee598fc51ed3aa6126"><img src="https://braininspired.co/wp-content/uploads/2022/02/book.jpeg" alt="" class="wp-image-1635" width="209" height="300" /></a></div>



<ul><li>Randy’s <a href="https://psych.rutgers.edu/faculty-profiles-a-contacts/96-charles-randy-gallistel">Rutger’s website</a>.</li><li>Book:<ul><li><a href="https://www.amazon.com/gp/product/1405122889/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=1405122889&amp;linkId=1362bd57c9102eee598fc51ed3aa6126">Memory and the Computational Brain: Why Cognitive Science will Transform Neuroscience.</a></li></ul></li><li>Related papers:<ul><li>The theoretical RNA paper Randy mentions: <a href="https://arxiv.org/abs/2008.08814v3">An RNA-based theory of natural universal computation</a>.</li><li>Evidence for intracellular engram in cerebellum: <a href="https://www.pnas.org/content/111/41/14930">Memory trace and timing mechanism localized to cerebellar Purkinje cells</a>.</li></ul></li><li><a href="https://youtu.be/JCDaURmKPm4?t=4211">The exchange between Randy and John Lisman</a>.</li><li>The blog post Randy mentions about Universal function approximation:<ul><li><a href="https://www.lifeiscomputation.com/the-truth-about-the-not-so-universal-approximation-theorem/">The Truth About the [Not So] Universal Approximation Theorem</a></li></ul></li></ul>


<div class="lightweight-accordion"><span>Transcript</span><div class="lightweight-accordion-body">

<p>Randy    00:00:03    Usually when I ask neuroscience is how you and code a number either in Asian apps or however many synopsis they think might be necessary. That’s a conversation stopper. All I get is hand ways, you know? Well, you see, there are lots of synopsis and it’s a pattern. It’s an absence. Well, could you say something about the pattern? I mean, how does the pattern for 11 different for the pattern from three, for example, can you shed a little light on that? You do not want to answer that question. The end is the low-hanging fruit, because it has a really simple job to store the information just like DNA’s job is to store the information.  </p>



<p>Paul    00:00:52    What is the role of synaptic plasticity?  </p>



<p>Randy    00:00:57    I honestly have no idea since I literally believe that an associative bond never formed in the brain of any animal. And since the plastics inadequacies, transparently conceived of as an associate bond, right? I certainly don’t think that’s what they all, could they play a role in the complications carried out in signals? Sure.  </p>



<p>Speaker 3    00:01:26    This is brain inspired.  </p>



<p>Paul    00:01:40    Hey everyone, it’s Paul. Engram. It’s a term coined by Richard semen in the early 19 hundreds. And it refers to the physical substrate that stores the information that makes up our memories. In other words, the, the trace of our memories, we still don’t have a definitive answer to the question of how our brains store memories, what makes up the end gram many neuroscientists would say a given memory resides in a specific pattern of neurons and the activity of those neurons and that the formation of new memories and changes in existing memories that is learning depends on changes in the connections between neurons, synaptic, plasticity. And of course, training deep learning artificial networks is fueled by adjusting the weights between their units to learn tasks, but not everyone agrees with this story. That memories are somehow stored in neural connectivity patterns and the activity of the neurons in those patterns as Tomas Ryan puts it and Tomas will be on my next episode.  </p>



<p>Paul    00:02:44    At what level does an in gram lie is an in gram in the cell or as a cell in the Ingram. Randy gala Stoll is my guest today. He’s a distinguished professor emeritus at Rutgers, and he’s been at this for over 60 years. And he’s been arguing much of those 60 years that the in gram must lie within the cell. Not that a cell is in the, in Graham and his argument, which we’ll hear him flush out is that brains are computational organs. And to compute you need symbols, namely numbers. And Randy thinks the only reliable way to store numbers over long periods of time, which is necessary. And to be able to read from those numbers and write new numbers is to use sub cellular molecules like DNA or RNA or something similar. He also detailed his arguments in a great book memory in the computational brain with Adam King, which was published over 10 years ago.  </p>



<p>Paul    00:03:41    I recommend that book. I have distinct, uh, episodic memories, uh, reading that book in my office in Nashville, for example. And I’ve gone back to it multiple times since then it goes over the fundamentals of information theory and uses examples from animal behavior like navigation and foraging to argue his case. So today we talk about some of those ideas, uh, some of the evidence to support those ideas and a host of other bells and whistles, including his long successful career, studying the many abstract processes, underlying our learning memory and behavior. You can find show notes at brain inspired.co/podcast/ 126 on the website. You can also choose to support the podcast through Patrion and join our brand inspired discord community. If you want and get access to all the full episodes I publish through Patrion, or just to throw a couple dollars my way each month to express your appreciation. I appreciate you listening. I hope this podcast is enriching your minds and bringing you some joy. Here’s Randy, Randy, you’re 80. You just told me you’re 80 years old. Yes. Well, uh, when, when, when did you turn 80?  </p>



<p>Randy    00:04:53    Uh, back in may.  </p>



<p>Paul    00:04:55    Okay. Well, happy belated 80th. So I know that you have been interested in memory since the 1960s, at what point. So, you know, we’ll get to the, uh, the big idea, uh, here in a moment, but at what point in your career did you start questioning the, uh, typical neuroscience story about memory  </p>



<p>Randy    00:05:20    Way back in the sixties? Uh, when I was an undergraduate in Tony Dutch’s lab and I’m deciding that I wasn’t going to be a social psychologist, I was going to be a physiological psychologist as we called them in those days. And now we call them behavioral neuroscientists. And, uh, I really became an, a pasta, um, during while running my first experiment, which was, uh, a runway experiment with rats and mine would watch them and just watching their ads. I became absolutely persuaded that they, um, they knew what they were doing. They, uh, it wasn’t habits. Uh, I had already become enamored of hole’s vision of a mathematically rigorous theory of mind and brain computation, what we would now call computational neuroscience. Uh, but, um, I had already become an, a positive from the rest of his doctrine because I, you know, with all it was all habits. And of course there are many competitional neuroscientists for which that’s still true, but that’s what I mean when I said a moment ago before we were recording that, uh, uh, nothing has changed in the 60 years. I go to meetings now and I listened to some of the talks. I think this is the same shit I was listening to in 19.  </p>



<p>Paul    00:06:49    Well, well, so, you know, one of the things that, um, you talk about in your book, um, memory and the computational brain, why cognitive science will transform neuroscience is that there is this large gap between cognitive science and neuroscience. Uh, and, and I heard you talk recently and you’ve written, uh, about this as well, that, you know, actually even back, that was 2009, 2010, when that book came out and, uh, computational neuroscience was still a small swath of neuroscience writ large. Right. But that’s changed hasn’t it has, has computational neuroscience, which to me seems like is the majority of neuroscience, what’s your view on that? Has computational neuroscience come along  </p>



<p>Randy    00:07:33    Well in terms of the number and quality of people doing it? Yes. I, I certainly don’t see it as dominating neuroscience. I mean, neuroscience, you go to the annual, you know, the meeting, uh, the society for neuroscience, there’s 30,000 people there, right? I mean, there are two poster sessions and they, in this, uh, the poster sessions are so big that even if you try to, you couldn’t go by all the posters right on there, two of them every day and so on. And, you know, competitional neuroscience is kind of small. And, uh, then that big picture. And also when I think about it, the computational neuroscience, I guess, so at least, certainly my world view was dominated by vision people back in the day. Right. I mean, there still is. They’ve been very computational now for decades. In fact, there’s a fascinating, uh, book by, um, by Google and, and weasel, uh, which they reproduce their papers. Uh, it was clearly a, a project of David Hubel and, uh, he produces 25 of their plastic papers and there are introductions and epilogues to each per my Google, and he repeatedly rants against the mathematician. Uh, you know, the math of the fact that all the engineers and mathematicians now come in division, right? Because like so many of the early people, he really didn’t know much math.  </p>



<p>Randy    00:09:11    And these days you cannot do cutting edge vision without a fairly serious mathematics education. Right. Um, but that was already through 30 years ago. Um, so I think what you’re reacting to is now, of course, there are many people, uh, doing computational neuroscience and focusing on learning and memory, which did not use to be true. I mean, those fields used to be completely non-mathematical, I’ve had more than one colleague and friend tell me they went into this business precisely because they didn’t have blur. Right.  </p>



<p>Paul    00:09:50    That’s right. Yeah. Well, I mean, it seems like these days, and, uh, and I, again, this is my own bias because I, uh, I learned computational neuroscience through my career, my short kinda, uh, academic career, but going in, I didn’t really, I had some mathematics background, but I didn’t have modeling background. I didn’t have, you know, a real, a good footing in the computational world. So I kind of learned that through my training. Um, but didn’t, you, you kind of applied yourself and learned some necessary mathematics a little bit later in your career? No. Oh yeah, for sure.  </p>



<p>Randy    00:10:22    Um, I’ve been learning various bits of mathematics throughout the last 60 years. Um, I, for example, I, uh, I mean, I had the calculus as an undergraduate, but I didn’t, um, linear algebra and I taught the undergraduate linear algebra course at Irvine during, after I was already in a tenured associate professor during my first sabbatical, when I was, uh, we’re working with Duncan Lewis and studying also linear systems theory, which I also basically taught myself. I went partly of course, Dunkin was two orders of magnitude, better mathematician than I ever imagined I would ever meet, but he was incredibly good at explaining things. And I was teaching myself by reading those textbooks on linear systems there. And there was stuff. For example, I remember I could not wrap my mind around the convolution integral. So I said, Duncan, can you explain the, what convolution is? And he sat me down and I remember it was a half hour later. I absolutely understood what conflict.  </p>



<p>Paul    00:11:36    And that was on, uh, the, did he use a Blackboard or did he use PowerPoint? I’m just kidding.  </p>



<p>Randy    00:11:40    It was basically just verbal, although he may, I would, this was a long time ago talking about it. It would have been the Blackboard, there may have been some recourse to the Blackboard, but mostly well, anyway, somehow he found there were examples that made it clear and then I was able to use it. And that was satisfying.  </p>



<p>Paul    00:11:58    If you had to go back, would you enter by studying mathematics first? Because I asked because you have a deep knowledge of the behavior surrounding learning and memory, which you also had to have to get to where you are.  </p>



<p>Randy    00:12:16    Yeah, sure. Well, that was, I mean, first of all, that was what I took courses in. And second of all, um, I mean, that’s what I taught for 50 years, right? So, you know, the behavior I’m in the more mathematical treatment I rarely taught at the undergraduate level. Right. Because it wouldn’t take a very special undergraduate seminar to do it. I did teach it at the graduate level. Uh, and as every teacher knows, you don’t really understand the subject until you’ve tried to teach it. Right. You get sometimes as the experience where you’re busy explaining this had happened to me, even when I was teaching introductory psychology, I’m halfway through an explanation. And all of a sudden the little voice says, you know what you’re saying? Doesn’t make sense to you.  </p>



<p>Paul    00:13:13    That’s true. You really find out what you don’t know.  </p>



<p>Randy    00:13:16    Oh boy, this argument has just gone off the tracks.  </p>



<p>Paul    00:13:23    Well, this idea of the, um, the brain as a computing device among other things has dominated your thoughts for a few decades now, right.  </p>



<p>Randy    00:13:37    Since way back since  </p>



<p>Paul    00:13:39    Way back. Yeah.  </p>



<p>Randy    00:13:40    And so I was in graduate school at Yale, a very behavior in school, um, in Neil Miller’s land, uh, you know, he was hell’s most prominent student. Um, but I have, as I said, I had already become a heretic, uh, as an undergraduate. So, uh, I wasn’t buying it, uh, nor was I buying it when I took the advanced course and learning from Alan wagon. Meanwhile, I was building special purpose computers to run the experiments I was running. And I was reading the theory of computation and books on how computers worked and so on. And, uh, and Chomsky was coming along. I went to a talk, this guy, I barely heard of Noam Chomsky. He came to speak at year. Uh, and I had just been reading the stuff that Skinner and, uh, Osgood then written on language.  </p>



<p>Randy    00:14:40    I didn’t know anything by language, but I thought this is rubbish. Um, and so I went to hear this talk by John scan. I was an instant convert. Okay, this isn’t rubbish. So I, uh, I embraced the computational theory of mind. And I thought since those days that I’m in many of these days, most neuroscientists pay lip service, at least to it. Right. But many of them would immediately, and yes, I think abuse, but it doesn’t compete the way a computer can pitch. This is the story having studied, how computers compute and, uh, I mean, I’ve programmed all the way down to the machine level. So I know how it goes, what goes on under the hood and is on. And, uh, I’ve always thought, well, wait a second, there isn’t any other way to compute. I mean, tell me how it is you compete, but not the way computers can do it. Right. I thought, I thought during settled that  </p>



<p>Paul    00:15:47    Well, so, so I had a, a brain inspired listener question about Chomsky’s influence on you. So it really, you remember going to a talk and, and having that sort of solidify your approach.  </p>



<p>Randy    00:15:59    Oh yeah. I remember being very impressed and then I read his, well, it didn’t come out to later, but when it came out, I read his reflections on language. Also at Penn, Ben was a cognitive science was very much a happening thing at Penn. And I had colleagues like lilac Lightman and Henry Gladman and Duncan Lewis. So I was strongly influenced, uh, by, by them. Uh, and Dick nicer was on sabbatical there. The second year I was on an assistant professor. Um, so I was influenced by all of those people and all of those people were influenced by Chomsky. I mean, Johnson, Johnson, king sort of ran through the way we all thought. There’s a kind of interesting story about that. Some years later after I’d been publishing a bunch of stuff that isn’t quite a number of years later, um, no mum I’d met once or twice and who I’ve often corresponded with subsequently, but he wrote me a very polite letter. It’s a letter. I think this was before email, um, gently complaining that I was, uh, channeling him without ever citing him. And I was very embarrassed and I thought, you know, he’s absolutely right. So I wrote back apologizing and saying, look, you’re so much a part of the intellectual mill you in which I swim. I just didn’t occur to me though, acknowledged or even recognized my intellectual debts anyway.  </p>



<p>Paul    00:17:37    Interesting. So, okay. Well maybe we, maybe we can return to a Chomsky later, but because I know you wrote a manuscript in 2006, I believe where you acknowledged the reflections on language and how that also influenced you. But I assume you got the letter before 2006 because  </p>



<p>Randy    00:17:53    Oh yeah, for sure. I had, it was a long time ago.  </p>



<p>Paul    00:17:58    So, um, memory and the computational brain, of course you, um, you detail your ideas, uh, in that book, but you’ve also, you know, continued writing. Um, and you know, I, there’s a recent 2017 piece on the coding question where you re revisit these, these ideas and you you’ve continued to give talks about them. So maybe just in the broadest strokes, uh, could you summarize your, the idea and your, uh, position, um, and then we can kind of go through some of the details as needed.  </p>



<p>Randy    00:18:32    So computation is operations on symbols, um, right before the emergence of computing machines, symbols and representations, all those things were regarded as hand waves, right. Uh, but we’ve computing machines. So when someone said, well, what do you mean by similarly? So you see this bit registering, you see that pattern of ones and zeros that’s been put into that, the, those switches that’s the number eight. That’s what I mean by a symbol, right? There’s nothing even faintly mystical about it. It’s a, it’s a fundamental, um, in this sense, symbols are the stuff of con mutation where I’m using stuff in the physical sense, right. It’s there the material realization upon which computational operations operate. And, uh, once I got into information area, I realized, yeah, right. And an even better way of putting it. And this will became apparent in the book with that on thing that these symbols carry forward in time information in Shannon’s sense of the term.  </p>



<p>Randy    00:19:51    Um, so that you can quantify, right. You can say, this physically realized thing is carrying this amount of information. So you could wave a sign, all the fears about dualism and so on that tormented that behavior. So we’re all terrified by the specter of bills. Right. Um, and, uh, so what, as far as I’m concerned, the computers just put paid to those worries, right? Uh, we had a completely physical theory. It was, uh, I thought then, and still think gave you a viable theory of mind, get my, when I at Stanford and Yale and the behavior and stage, if you said, well, the rat expects to find food at the end of the runway, you can see there were saying, well, I don’t think we maybe should have been admitted. Um, somebody who is so soft headed as to talk about expectation,  </p>



<p>Paul    00:20:55    Because it was related to theory of mind, or  </p>



<p>Randy    00:20:59    Because before the appearance of computers, I mean, Skinner, denounced expectations in the most uncompromising berms is on the side of defect. Right? And so you couldn’t see them, you, you couldn’t feel them. They had no business in science. And, uh, and of course, as soon as you began programming computers, you would set up one number that was going to be compared to another number sentence. So then I just turn them and say, Hey, look, here’s my program. It runs on that machine. I don’t think there’s a ghost in that little computer. I built this number is what it expects. And this is the operation by which it compares another number to that to decide whether what it expects was actually the case and the story get off my back.  </p>



<p>Paul    00:21:53    Yeah. W but is that a, is that a redefinition of expectation over the years toward a more cause you know, the word expect one conjures, a notion of someone having a subjective feeling of expectation. Right. But now when someone says, expect, at least in the cognitive science, computational neuroscience world, all you think of is like a predictive processing, a numerical abstract process. Sure.  </p>



<p>Randy    00:22:19    Now these days where everybody’s talking about prediction, error, they’re taking for granted that there’s an expectation and the terms in which I’m talking about it, I’m never worried about these phenomenological things, right? Like what does an expectation feel like? Not the kind of question I’m interested in, why not? Because I don’t think it’s possible to get a hold of it and a strong for just the reasons you were finding out. Right. That is all I need for expectation is what I just described. Right. And it’s perfectly clear and there’s no problem with it. And now that we have competing machines and we see this going on all the time, when people ask, well, does the computer feel the way I feel when I have an expectation? I think, I don’t know. And I don’t care. It’s not the kind of question I’m interested in. Right. In fact, if you notice what I’ve worked on almost entirely, particularly in recent years, uh, the last few decades, it’s what I call the psychophysics of the abstract quantities, uh, distance duration, probability, numerosity, and so on that quantities that are a fundamental part of our experience, but they have no quality. Right? I mean,  </p>



<p>Paul    00:23:45    What you said, quality. Yeah. Oh,  </p>



<p>Randy    00:23:48    Well, I said it precisely to say that if you work on those things, you don’t worry about quality because they have no quality. If people say, what is the duration feel like? Right. So all the philosophers that are beating themselves up about, uh, you know, what’s it like to be a bat. Um, and they’re all worried about quality as well. Well, you just, isn’t something my worry about, uh, because first of all, I think the quality of are the things that have quality are a relatively minor interests. If you want to know what behavior is founded on is founded on the instructions. I was just talking about the probabilities, the numbers, the durations, the directions, the distances, all, all these abstractions. There are what drive behavior all the way down to insects, right? As you probably know, I’m a huge fan of the insect navigation better, right?  </p>



<p>Paul    00:24:49    You liked the bees, you liked the ants. He liked the, uh, the  </p>



<p>Randy    00:24:52    Butterflies, the beetle the butterfly had done because the dung beetles walking backwards with their ball though, walking home backwards,  </p>



<p>Paul    00:25:04    Maybe the central argument or one of the central arguments, is that the story in neuroscience that the numbers and the numerical abstract symbols, I should just say, symbols are encoded in the synopsis, right? In the connections between neurons, among populations of neurons. But you have a hard time believing that that could be the case.  </p>



<p>Randy    00:25:27    Well, actually, I, usually when I ask neuroscience is how you code a number either in ACE and naps or however many synapses they think might be necessary. That’s a conversation stopper. Um, I don’t know if he ever viewed my YouTube of my talk at Harvard where, uh, John Lisman was discussing. And, uh, I posed that question at the end of my talk St. John, when you get up here, you’ll tell us how you store a number in a scenario. And he got out and gave a lengthy discussion in which he never brought that topic up. This was a very unusual in that I got a rebuttal. I would get another chance to speak. And I said, John, I’m going to give you another chance.  </p>



<p>Randy    00:26:20    How do you store a number in the center has come on, John and the audience began to laugh and he stood up and he would not, he would not answer the question. Um, and I had a somewhat similar experience with Jean-Pierre Sean ju much more recently. Uh, in fact, the question made him so angry that he wouldn’t allow the debate that we uploaded to. It’s going to say, I didn’t see that one. So, and I’ve gone so far, often in my thoughts, I say, come on guys, I can offer you two alternatives. Uh, I mean, it’s not as if it’s impossible to think of an answer or what I just sin. And I often proceed to say, well, look, the synopsis usually conceptualized by computational neuroscience is a real value of variable and distance direction. Probability. They’re all real valued variables, right? So you can always represent a real value variable by a real value variable, right?  </p>



<p>Randy    00:27:21    So we could say, well, if the synopsis this, if the weight is this big, then the distance is that far. Right? And if the way there’s this big, you want to go there? I found practically no one wants to go there. Oh, you don’t want to go there. Here’s a radically different alternative. Supposedly have a bank of the people who talk about the synaptic plasticity are very vague about how many states are synapse can assume, but one school of thought thinks they’re binary. All right, fine. There. I like that. That’s a switch. Okay. So we’ll have an array of binary synopses, and we throw this maps to this state and this snaps to the zero state. And now we’ve got something just like a computer registered. You liked that story. No, most people don’t like that story. All right, we’ll watch your story. Uh, and at that point, all I get is hand ways.  </p>



<p>Randy    00:28:29    You know? Well, you see, there are lots of synapses and it’s a pattern that’s in absence. Well, could you say something about the pattern? I mean, how does the pattern for 11 different for the pattern from three, for example, could you shed a little light on that? People do not want to answer that question because the answer to that question is to admit that there are symbols in the brain. And even to this day, many people do not want to go there. And what’s your answer? My answer is that isn’t in the synopsis. I mean, I point out that there are several labs around the world that are busy studying how to use bacterial DNA as the memory elements in a conventional computer, right? Any engineer, anybody familiar with the computing machines that actually work, uh, and that we know how they work. Once you show them a polynucleotide and explain that any nuclear that had can be adjacent to any other nucleotide, any engineer worth of says Wolf’s could store like nobody’s business. In fact, one of the people who introduced me in that talk, I gave a couple of years ago in the introduction, showed a very grainy video of a running horse where the video, the entire video had been passed through the bacterial, just to drive home the fact that, that if you’re looking for a place to store numbers, well, uh,  </p>



<p>Paul    00:30:14    Well we know, yeah. We know DNA stores, the genetic code. Um, but there are other possibilities as well. I’m wondering what your current, so DNA is one possibility, right? Where a code could be stored intracellularly and to you, um, the key, I don’t know, I don’t know what your current thoughts on this, because it used to be that, uh, you didn’t know, um, that there were, you know, a handful of intracellular mechanisms whereby you might store these things, proteins degrade a little too fast, right. But then there are, polymerases like RNA, uh, could be one of the, uh, substrate DNA could be a substrate, but as DNA, fast enough, what’s your current thinking on what might be the, uh, substrate?  </p>



<p>Randy    00:30:58    Well, my I’m still sticking with polynucleotide, so I lean much more strongly to RNA than to DNA, probably complex with a protein destabilize. It, my thinking has taken a huge boost lately from a wonderful paper, by a young guy in Gabby Nayman’s lab on the Rockefeller named the pessimist Dean, uh, flooding poor, but it’s just appeared in the journal of theoretical biology in the last couple of weeks. Uh, and, uh, he come he’s in a stylish guy because he, he has a truly deep knowledge of, um, theoretical computer science, much deeper than mine. I mean, he really knows the Lambda calculus, right. Whereas for me, it’s just kind of a name. Um, but at the same time, he really, he has a much deeper knowledge of RNA biology than I do. But the most astonishing thing is that they, I mean, those two things are about as far apart, as conceptually, as you can readily imagine.  </p>



<p>Randy    00:32:02    And, but he has this very rare mind that can bring those two things together. And he lays out a detailed story about computation performed at the RNA level, in which RNA is both the symbols and the machinery that operates on the symbols. And then you use a builds. It is on the, uh, on the Lambda calculus. And he sh and he lays out in his appendix in great detail on RNA machine that will add arbitrarily large numbers. Now, for all those computational out there in your audience, I claimed that that has never been done by a CNN and that it never will be done by at least by a non recurrent C a bio straight through CNN. And even if it’s done by a recurrent one, right, they’re going to result resort to that a little recycling, the, you know, cause they’re going to have to store addition is inescapably serial, right?  </p>



<p>Randy    00:33:12    So you’ve got the, you’ve got to do the earlier, the less significant digits first. And you have to store that as a result and then transfer the carry to the next one and so on. So you need memory and uh, so how do you get memory? Well, that’s where recurrent nets come in, right? It keeps sending them around the loop, which, uh, in this paper by Aquila poorer that I recommend in the strongest terms, uh, he also has a wonderful discussion of dynamic systems and why and why they’re not stable, right? The very guy Moore who proved that they, that they were turning complete, also argued very strongly that, uh, they weren’t stable. So they weren’t physically realizable the turn complete ones were just kind of mathematical dream and they weren’t physically stable.  </p>



<p>Paul    00:34:14    Well, you, you, I didn’t know about that more recent paper you used to hang your hat on and maybe you still do, uh, the per Kenji cell, uh, finding in the cerebellum. And maybe you’ll just add this more recent finding with RNA to your, um, uh, to your talks. Now  </p>



<p>Randy    00:34:33    You’re absolutely right. I mean, I, I still think Frederick Johansson’s discovery of the development of that preparation, which was the culmination of a 40 year effort in Jerry hustler’s lab. I still think that that, that what he has done his hand, the molecular biological community, uh, what they need on a platter. And for the first time, I think we could actually know the physical basis of memory while I’m still sentient. Uh, and, uh, that would be a miracle because he’s identified the beginning and end of an intracellular cascade. And one of the steps in that cascade clearly contains the end gram that encodes the CSU S interval. I think his PhD work proved that beyond reasonable argument and, you know, well, I can, her biologists know how to follow intercellular cascades, right? I mean, he identified that the post synaptic receptor at the start of this cascade, and this is a metabotropic receptor, right.  </p>



<p>Randy    00:35:37    Which means that it transfers the message from an extra cellular source to an intracellular signaling chain. And, you know, there’s almost certainly a Jeep protein on the inside of that membrane and that transforms and gooses the next thing. And so on. And molecular biologists have been chasing these cascades now for decades. And so, yeah, it’s always been, how would I know that I got to the end grid, but no Hudson has solved that problem for them. If only they realize it because he brewed that the information about the CSU S interval is not in the incoming signal that triggers this cascade. Right. But he also identified a potassium channel and inward rectifying potassium channel at the other end of the cascade, a channel that’s a key to producing the pause, the timed pause that comes out of the cell. Right. All right. So you’re following this cascade. And until you get to the end, gram the information about the duration of the interval, won’t be in any step you’re seeing. Right. And on the other side of the end gram, the information will be in the chain, right. Uh, because it’s there by the time you get to this potassium channel. So you’re following the cascade and at some points here, whoa, where does it go look at that this step is informed by Lin breath. All right. So at the end Ram lies between the preceding step and this step. Whoa.  </p>



<p>Paul    00:37:15    Yeah. But yeah, so, so there was, is the, is the more recent, uh, theoretical biology paper with the RNA? Uh, does it address the reading and writing mechanisms because that’s, that’s what you’d have to follow right. To address reading and writing?  </p>



<p>Randy    00:37:31    Well, keep in mind, in fact, I strongly suspect if I can guess how things will play out, that we will discover the in-room before we understand either the writing or the reading mechanisms. And again, I would appear here the appeal here to the history of, uh, of DNA, right? The engram is the low-hanging fruit because it has a really simple job. His only job is to store the information just like DNA’s job is to store the information. So we are still learning how that stored information gets translated into actual organisms. Right now we’ve made enormous progress in that. Uh, but there’s still a very long way to go. And this has been going on now for decades, right? 40, 50 years ever since 1953. So the DNA story that emerged pretty quickly, right. That the basic, okay. Here’s how the information is encoded. Here’s how this carried forward in time.  </p>



<p>Randy    00:38:39    There’s a story about, uh, how it’s red is five orders of magnitude, more complicated, right? I mean, you can explain DNA to a smart undergraduate in half an hour, right. Uh, if he then asks or she then asked, uh, oh, okay. How do you get an eye? Then you say, well, okay, come to my advanced graduate seminar. And, uh, we will spend the whole seminar, um, discussing what we understand about how you get from a gene doing not right. One of the astonishing things we’ve learned is that there is one gene that, you know, there’s a gene, you turn it on, you get an eye where we turned it on. Right. When I was being taught biology, uh, we are being taught one gene, one protein, which is of course still true, but everyone took it to be a corollary that if you thought there could be a gene for an eye, you were stupid. No one could imagine what, what you, I mean, there was this huge gap between, okay, you got, you know, we’re coding for a sequence of amino acids, right. An I isn’t a sequence of amino acids. Um, how now again, I would say the reason they couldn’t imagine how it’s done is they didn’t know enough computer science, because it turns out that the, the protein that, that gene and goats for isn’t a building block in the eye it’s transcription factor, right? It’s  </p>



<p>Paul    00:40:22    All transcription  </p>



<p>Randy    00:40:23    Factors. You have to go five or six steps down before you get past the transcription factors. Now, anybody who knows how relational databases work would say, well DOE or, or how a function works, right. When you, you know, when you call the name of a function in MATLAB that just accesses the code for that  </p>



<p>Paul    00:40:45    And on and on,  </p>



<p>Randy    00:40:48    That’s how you build complex operations out of simple operations. Right. And that’s what got the addition is all about.  </p>



<p>Paul    00:40:56    Let me, let me try this out on you, because I’m just thinking about this, uh, talking about the, I know you just said that the Reed and the Reed mechanism is orders of magnitude more complicated, and then the right mechanism must be even more complicated. I would imagine  </p>



<p>Randy    00:41:13    Until we know what the engram is. I think we, I refuse to think very long about this issue, because I think, I don’t know what it is I need to know in order to think productively about because the right mechanism has to translate from an incoming code in the spike train. And since we still, despite the Rica at all book, which I worshiped and from which I learned my information theory, I have friends, even my collaborator, Peter Lytham who thinks that’s a great book, but I think that, well, it’s just about the fly sensor. It’s the answer to how spike trains carry information period. Right. It’s, it’s all in the inner spike intervals. Well, and there’s several bits per interspike interval. Well, there’s no agreement about that. Right. So right until there’s agreement about how the information is encoded in the incoming signal and the agreement about how it’s encoded in the written thing you can’t think productively about what could the machinery looked like that would take this code and produce that code any more than you could get from, um, DNA to homeobox genes. Right. But without knowing all the very complicated stuff that goes on in between, and then knowing how homie a box genes work, right. I mean, they code for abstractions, anterior distal it’s as if, uh, somebody went to a and that dummy lesson back in the Precambrian, right. Yeah.  </p>



<p>Randy    00:42:58    And they said, well, we got a code, but here we got to have a code for, um, the end, but whatever it is, we’re building, we have to have another code for what anyway. You’ll get.  </p>



<p>Paul    00:43:09    Yeah. Well, well, let’s, let’s pretend for a moment just as a thought experiment let’s we don’t, it doesn’t have to be RNA, but there’s some in intracellular mechanism. Right. And, um, you just mentioned, uh, so this is going to be kind of a long winding, um, thought train here, but you, you had just mentioned, you know, about the receptors and how there is this, uh, enormously complex cascade from receptors to intracellular processes. And, um, that, that, anyway, that that’s a long cascade, you also mentioned convolutional neural networks in a derisive way, playfully derisive way. Um, however, thinking about a read-write mechanism. So you probably know that, um, you know, given a large enough, uh, neural network, that they are a universal function approximators right. They can transform from input to output and the mathematically proven that the universal function approximators talking about, uh, the, the cascade from, uh, extra cellular membrane protein to intracellular happenings, uh, sounds eerily like a neural network kind of process because you have all these interacting, uh, sub components. Right? The other thing, um, that you, you mentioned that we just talked about briefly is that the majority product from DNA from genes is recursive is transcription factors, which feeds back onto the DNA, which regulates the protein synthesis. And the next protein is another transcription factor. That sounds eerily like recurrent neural networks, right. Feeding back. So, so these, these processes are, um, uh, one could make a very loose argument that they are, oh, what’s the word? Not similar, not analog, uh, analogous in some fashion,  </p>



<p>Randy    00:45:07    They are analogous. They clearly are. Those analogies are traced out in the, um, Peter Sterling and Simon Lockland book on which they argue that compute with chemistry. It’s cheaper. I think they’re spot on by that I would add 10 orders of magnitude cheaper. Right? I think they don’t slam just on how much cheaper. Um, but they do these dynamic systems, uh, analogs. Now this same  guy has on brand new blood boats. I just got, I just saw it yesterday or day before yesterday, and which he takes up that proof of a universal function approximately there. And it shows, first of all, that it’s not really true. It’s only true on the closed interval, not the open interval. So, but second of all, he, he revisits the arrogant. And so all the processes that you’re describing are dynamic systems and he revisits why you can’t really do computation with stored information with dynamic systems.  </p>



<p>Randy    00:46:21    He has a much more sophisticated technique on this, uh, um, take on it anchored in a much deeper understanding of the foundations of theoretical, theoretical computer science. But my much simpler I can move to. I know he agrees with me, um, is like those proofs said, well, what do we mean by a universal, uh, by a function approximately a function approximator gets an input vector and it generates an output vector. Oh, okay. Uh, that’s the way a mathematician thinks about it, but it shows how, not the way a computer scientist thinks about it. Um, because there’s no memory in that. Right. And a computer scientist is very aware that in your average computation information comes in, some of which was acquired 50 years ago as we sit here talking, right. Uh, as I’m summoning up the words in the English language, right. I learned most of them, uh, when I was, uh, less than five years old. Right. It’s, uh, they’ve been rattling around in there and now for 75 years,  </p>



<p>Paul    00:47:29    However, now I’m forgetting many of them.  </p>



<p>Randy    00:47:32    It doesn’t get better. Let me dang it. I’m beginning to have noticeable word finding problems and someone whose verbal facility was all as one of their great strengths. That’s very painful. And I’m sorry, I couldn’t the other day I was explaining something and I couldn’t someone, the word factorial. I was, I wanted to say the Sterling approximation. I couldn’t say what it was an approximation to because I couldn’t return to the word factorial. Oh, geez. Anyway, um, the point is that real-world computations require memory because you get one bit of information, you put it in memory, you get another bit, maybe 10 days later, maybe a year later, maybe 20 years later, you put that memory. And so if you look at most of what we do, it’s putting together at a given moment information that was acquired at many different times in the past.  </p>



<p>Randy    00:48:35    And that’s what brains when you’re talking about real. So I hope it’s clear why this makes that proof totally irrelevant, right? Because that proof assumed that all that information had been very thoughtfully assembled for you by some genius and packaged into one humongous vector. And that we fed it to the computer in a generator in the neural net and then generated an output vector. Well, of course, that’s where you have to think about the system and when it has no memory, but that’s of course just why in throwing out the memory, they threw out the baby with the bath, right?  </p>



<p>Paul    00:49:15    Well, the memory would be in the distributed connections, right? The distributed weight,  </p>



<p>Randy    00:49:20    That’s a finite state machine in the proper definition of a finite state machine, which not, it is not that it’s finally a finite state machine is a Turing machine that cannot read what it has written. Okay. I asked the mathematically equivalent to the usual definition. Um, but it showed, but if you’re thinking about these things, it shows you what the huge difference between, um, attorney machine and a financial state machine.  </p>



<p>Paul    00:49:48    Um, it can only, it can only go from state to state with some transition rule and probability in, so,  </p>



<p>Randy    00:49:54    And we hammer on this a bit. So if your iPhone or your mobile phone with its camera or a finite state machine, then it would have stored in its wiring diagram, every picture that you’re ever going to take with that phone. I don’t think so. You can take more different pictures with that phone. Then there are elementary particles in the knowable universe, right. That’s my definition of a true infinity. Right. Okay. So we didn’t put all the possible pictures in the wiring diagram of that farmer. Right. We put in something that would convert quantum catches to switch throwers to memory elements. And of course, then the phone immediately gets busy running some compression algorithm, um, because there’s huge redundancy and the pixels, right? So, uh, uh, but a device without memory can’t do any of that, right? No, no memory, no iPhone.  </p>



<p>Paul    00:51:11    So just stepping back, because often on this podcast, we talk about the current successes of the deep learning, um, folks. And a lot of that is being applied to neuroscience to understand, uh, how brains function. And I know that you are aware of, um, the, that the line of deep learning wherein from like Alex Graves and so on where external memory has been supplied to the neural network. Um, but the book memory, uh, and the computational brain was actually written before the quote unquote deep learning revolution when, uh, deep learning started to dominate. So, um, for fear that, uh, this diatribe could take the rest of the time, keep it short. I’m curious about your thoughts on the, uh, success and the ubiquity now of, of deep learning, uh, and its application to understanding how neuroscience, how brains might function.  </p>



<p>Randy    00:52:12    Well, trying to keep it short. You remember them, you don’t  </p>



<p>Paul    00:52:16    Have to keep it, sorry, but I, you know, we  </p>



<p>Randy    00:52:18    All have us lie line from the graduate plastics. Well, my, my, uh, wisdom distilled down to a very few words would be adversarial images.  </p>



<p>Paul    00:52:31    Sure. But what happens when that gets solved, but okay, well, well,  </p>



<p>Randy    00:52:35    Yeah,  </p>



<p>Paul    00:52:37    Well, I am definitely.  </p>



<p>Randy    00:52:40    Yeah. So the last time I checked no solution wasn’t insight and it reflects a deep truth about how those systems work. Right? Most people don’t realize that when they, um, image recognitions. So system inside Elon Musk, Scott warns the rest of the system, that there’s a stop sign there, right. That system, because it’s a deep neural net. Uh, and because they don’t know how to extract shape, what is really decided is look, these pixels are stopped signage. And this region of the pixels has the statistics of a stop sign, right? If you were to, well, is it octagonal, then that would respond. What’s an octagon.  </p>



<p>Randy    00:53:33    And you would, if you explain what an octagon is, the net, the net would say, look, I don’t do shape. Uh, and at least I have noticed, and I think others will have noticed that the hype about we were going to have auto self-driving cars has died down very considerably because the adversarial images taught the malevolent smart, but multilevel and high school students of which there are two greatest supply, how to go out and hack every stop sign in town, right? Uh, with, uh, you get yourself a cran tape, you get a cran and you make various graffiti on the stop signs. And Elon Musk’s cars will blow right through this nonsense. Uh, okay. So, uh, Hey guys, I think it’s wonderful that you, uh, got the system to work the point where you could do, I’m not discounting this achievement, but when you start telling me, this is how the brain works, and that means the brain has no memory. I say, I don’t think so because you can’t do deep learning. I taught Jay McClellan years ago and he and I have been arguing ever since,  </p>



<p>Paul    00:54:50    Oh, he’s one of, he’s one of the ones who’s working on, building math and reasoning into,  </p>



<p>Randy    00:54:55    I keep telling him KJ, forget math and reasoning. Look the ant and the be do dead reckoning. Why don’t you try that? Uh, I want to see how dead reckoning works in a system that has no memory. I’ve been taunting. I’ve been trolling him with that challenge now for 20 years. And, uh, he doesn’t bite. Uh, cause I think like anyone you look at dead reckoning and say, whoa, uh, we are going to have to store the current location. Right. I mean, there’s no way of getting around it. Uh, and that’s going to extend over hours.  </p>



<p>Paul    00:55:28    Yeah. Well, and yet, okay. So over hours as a point, you might, um, bring up again here because I wanted to ask you, first of all, whether you’re aware of, and then secondly, your thoughts on, um, there, there are, there have been, uh, deep learning networks paired with reinforcement learning techniques in the AI world that have used convolutional neural networks and used LSTs that have done path integration in little maze environments, virtual amaze environments. And that’s not  </p>



<p>Randy    00:56:00    Toy environments in which tile, the maze c’mon in order to make it fit into the reinforcement learning thing. They say, well, look, here’s how we represent the maze. Right? You see this tie that we tile it, right? And then each tile knows, well, then it gets interesting. Uh, I think very few of them actually give the tiles metric information. That is, um, I know that the, a star algorithm, which is how the Google maps finds, uh, routes of course has metric information, right? It’s all, it’s all there and the cost function. Uh, so that’s why Google send around cars with GPS is right there. Record extremely precise metric information all over, uh, all over the world. But in the ones that I’ve seen, that the reinforcement learning ones, they, uh, you know, reinforcement learning, they say, well, when you’re in state one, you do you learn to do action one.  </p>



<p>Randy    00:57:08    And when you’re in state two, you learn to do action. To, first of all, this, they don’t seem to realize that this is essentially identical to Clark calls theory. Uh, that’s why when I say, Hey, I was listening to this nonsense 60 years ago by, you know, they don’t put in any metric information. Come on, I’m a sailor, I’m a Navigant, I’m a back country skier. I ski alone in the back country. Hey, uh, you know, you don’t tile the winter wilderness, you say, okay, I’m headed this way. The sun’s over there. Uh, you work the way navigation has always worked. What direction am I going? How far am I going?  </p>



<p>Paul    00:57:55    However, the average human is easily lost in that scenario. Whereas the average B or aunt, uh, isn’t isn’t lost, right.  </p>



<p>Randy    00:58:03    Well, there are plenty of the other ones aren’t lost either. I’m by no means the only one who does back country skiing, even alone. And of course, uh, Joshua Slocum sailed alone around the world, right? Uh, um, using totally traditional navigational, uh, methods, boasting with some good reason about his accomplishments. But, uh, the, the reason people don’t know how to do this in the modern world is they always live in cities and they get from one place to another on taxi cabs on subways. So they’re never, they’re never called upon to do this. But when I was in, uh, college, I worked one summer for a month until I turned them into the better business bureau with a Collier’s encyclopedia selling encyclopedias door to door under the tutorship of a, of a man who had been doing this all his life. And, uh, in those days you sold these in the newly built suburbs, which had all these twisty roads and pulled the sacks.  </p>



<p>Randy    00:59:07    And so, and then came in and you went all around and so on. And this guy was intensely proud of the fact that he always knew exactly how to get back out of there. And we would be driving her out. I’d be totally lost. And he’d say, which way is the entrance? No idea. He would point it at it. He knew which way it was to the interest within tender Reese, no matter how long we’d been in there. So it’s a matter of somewhat of talent. Some people have more Dalen for it, but it’s also a matter of habit, right? I mean, if you walked alone in strange foreign cities, maybe the first time you got seriously lost, but you learn something from it. Now when you leave the hotel and you walk down and you get to the first corner, you have turned around. In fact, it’s just what the bees do when they leave, they turn around and look back.  </p>



<p>Paul    01:00:02    But the fact that we can basically unlearn that skill and we would have, you you’d have to learn it back, right. Uh, argues it could argue multiple different things. You know, the question that I want to ask you is if you think that there could be multiple memory mechanisms, you know, obviously the quote-unquote memory, um, there are multiple types of memory defined and that is continuing to change. What kinds of memory that we have. So, you know, for example, something like episodic memory, where you can recall an event, right? And I know that you don’t care about, uh, mental phenomenon, uh,  </p>



<p>Randy    01:00:38    Only an episodic memory. Uh, crystal, Jonathan Bristol has demonstrated a beautifully and rats and, and of course, uh, um, Nikki Clayton and, uh, Tony Dickinson demonstrated it spectacularly in those food cashing, birds. Right,  </p>



<p>Paul    01:00:56    Right. On  </p>



<p>Randy    01:00:57    Board with episodic memory, but it’s all numbers,  </p>



<p>Paul    01:01:01    Right? So I’m thinking more of  </p>



<p>Randy    01:01:03    The right amount, uh, texture, uh, what goes in one episode, right. Numbers.  </p>



<p>Paul    01:01:13    So to your mind, there is one memory mechanism, uh, in all brains.  </p>



<p>Randy    01:01:19    That’s what I think is by far the most likely, uh, of course, I don’t know. And of course it’s conceivable that there are different learning mechanisms, but once you grasp how simple and how fundamental memory is memory understood the way I understand it. Right. Which is just, Shannon’s memory is a medium of communication. It’s, um, machinery, the medium, the material stuff, by which the past communicates with the future. Now Shannon in his opening paragraph pointed out that, Hey, look, if you’re inter if communication is what you’re about, and he might’ve stood up and said, I’m a communication engineer, and they’d pay me here at bell labs for communication. If communication is what it’s about, you don’t give a shit about the content. That was a truly profound insight. And I don’t see why that doesn’t apply just as much to the brain as it does to, to computing this.  </p>



<p>Randy    01:02:26    Right. When I go buy a new stick of gum to save a terabyte of information by they don’t ask me, well, are you going to use this for word processing or spreadsheets or MATLAB files? It’s all just information when it comes to communication and memory is, is communication. So it’s a really, I think DNA is, again, look, evolution solved this problem. Once it found a really good solution, that was a, probably 2 billion years ago. It same as the animals have been navigating since the Precambrian we can tell just from their tracks in the mud. Right. So, um, you can navigate without a map, without a memory, all these. So in one of your other questions you asked about how about skills, right? Motor skills and motor skills. Yeah. If it’s going, if there is going to be a case where it’s different than I would say, well, that could easily be where, but I kind of doubt it because I think skills can be, and I’m a student of the motor literature I’ve written about it at some length occasionally. Um, I think skills can be learned as parameter tuning that is, you’ve got, you’ve got a system that’s an incredibly versatile memory system. This stuff was all in my first book and the  </p>



<p>Paul    01:03:55    Best the organization of learning.  </p>



<p>Randy    01:03:57    No, they are tradition of action. There’s another book 10 years before that. Um, but I, and what I’m saying is this is not original with me. This is very much there in the literature that was in that book. And, and it’s there and say, even martyrs work right with a stigmatic gastric ganglion, right? You’ve got this set of oscillators, a very simple circuit, right. But there’s oscillators. And some feedback, feedback is important. Don’t get me wrong, but only under circumstance in certain circumstances. And there’s a, of course, inputs, inhibitory inputs, and what have you. But the way the system is basically controlled is by, um, signals that come down from higher and the nervous system and adjust the parameters. Right? So, and parameters, we’re back to numbers, right? What are parameters? There are numbers.  </p>



<p>Paul    01:04:59    So I have a memory from, well, I don’t know if it was three or four years ago. So my memory for times is not great, but, uh, we held my wife and I, uh, held a chili cook-off at our house. And, um, I won’t tell you how my entry did. I didn’t tell you I didn’t win the trophy, but, um, there was a particular entry, uh, that tasted a lot w the flavor was dominated by celery. Um, and I remember this and I think it got last place. It was just overwhelming celery. Uh she’s she’s, uh, she was a vegetarian and a kind of a whole holistic medicine also person. But anyway, I was talking to her about it the other day. And I can remember that, uh, she felt a little, um, you know, sad about this, but, but I, but I, but I have this episodic memory and we don’t need to go on about episodic memory. I have this, you know, experiential memory of what that was like, and the flavor of the celery and me not winning also, you know, and all that kind of, um, and I can picture our house and stuff. So I guess the question is, does the new, the intracellular numerical mechanism, uh, account for that type of experiential memory?  </p>



<p>Randy    01:06:11    Well, not without some spelling out of additional hypotheses. So, but I did address your question at considerable length in the near final chapter of my 1990 book entitled the unity of remembered experience. That book has been cited many thousands of times, but as near as I can tell, no one ever read that chapter, if they did, they dismissed it, uh, because it, it, it addresses exactly the question you’re posing, uh, how to these diverse aspects of an experience and the experience extends over time and space and involves many different components that teeth taste of the celery and so on. How do they all get knit together? And I argue there that first of all, they’re not knit together by associations because that brings you into an explosion, right? You have a combinatorial explosion, you’d get this net of a honorably, many associations, the unity, the phenomenological unity, uh, arises in the process of recollecting the experience and that you use time and place indices, all memories on this story have a timestamp and a location stamp.  </p>



<p>Randy    01:07:29    And I present, I review experimental evidence for that flame. And this is now of course, 30 years old. Uh, and there are more evidence for it. Have a somewhat similar nature has emerged, uh, with, uh, in the intervening 30 years. But I spell out in some detail how you could use if every memory, if they’re all in separate bins, in separate neurons and song, but they all have as one of their fundamental components, a time and a location stamp, which plays the role of the, um, the opera wrong in, uh, DNA, right? It’s the address. Then you can move, uh, among these memories in recollecting and experience that is you because the episodes are always located in at a time and in a place they’re located in space time. Right. And so you can retrieve the facts if using those indices is I read the hippocampus literature, then they’re outside. I think, uh, I see, I think someday they’re somewhat, well, I actually, I can, I come down and the guy who died, Howard  </p>



<p>Paul    01:08:47    Howard,  </p>



<p>Randy    01:08:47    He was starting to argue this same sort of thing. And I wrote him, I said, Hey, Howard, go read my chapter. I, this is what I was doing 30 years ago. And he wrote back and he said, yeah, I’ve been reading it. You’re right. You were a guy that’s outside, you’re in the, in the future. And then he died  </p>



<p>Paul    01:09:08    Totally aside. But this happens over and over. And you’ve been around long enough to have experienced this personally where new ideas are not new ideas they’ve been written about in buried in chapters. Uh, um, so how many times this has happened to you?  </p>



<p>Randy    01:09:24    Oh, I don’t get uptight about it for one thing as a, because I’m a sinner. I both listened to her and sinned against us witness that he is. And, uh, and I, I w I wasn’t bang. Right. Just, I thought Howard and I could make common cause here. Right. And I was deeply disappointed in what he does.  </p>



<p>Paul    01:09:47    You got to stay alive to keep doing science. You got to  </p>



<p>Randy    01:09:50    Stay with me here. It’s not, I think that’s the general answer. I mean, take salary for a month for a specific, so it has quality of pace, but then the vote is color, right? And there’s one thing we’ve known now for more than a century color is represented in our brains with three numbers. And, uh, recently the story for both taste and odor has emerged the same. They’re all vector representations. The dimensionality of the spaces is higher, but these days, Doris Tsao and lots of people are pushing vector representations really hard. And of course back they’re just strings of numbers, right. And, and they represent faces and, and, um, Chuck Smith has argued that the same story is true for odor even in Drosophila. So again, the salary, it’s all numbers, right? It’s a, the tastes are represented in a four-dimensional space. Colors are represented in a three-dimensional space. Faces are represented in a 50 dimensional space. You can get the idea  </p>



<p>Paul    01:10:56    Two more questions, and then I’ll let you go. And I appreciate you hanging around with me. One, what is the role of synaptic plasticity?  </p>



<p>Randy    01:11:05    Um, no one knows the least of all me.  </p>



<p>Paul    01:11:09    I thought I assumed that you were going to say in coding writing.  </p>



<p>Randy    01:11:14    I honestly have no idea. I, since I literally believe that an associative bond never formed in the brain of any animal and since the plastics in emphasis transparently conceived of as an associate of bond. Right. I certainly don’t think that’s what they all, could they play a role in the complications carried out in signals? Sure. Do I have, it seems likely that they probably do, but I do I have any good ideas what that role might be? No. Um, does anyone else? I don’t know. I don’t follow the literature very carefully, but everybody seems so hung up on the idea that there are associative bonds that I think until they dig themselves out of that conceptual hole, they’re never gonna find out what they’re really about.  </p>



<p>Paul    01:12:05    What’s keeping you up at night, these days. What are you thinking hard about? That’s just beyond reach to you.  </p>



<p>Randy    01:12:17    Well, how’d it get the molecular biologist to realize that Fredrico Hudson has offered them the world on a plate.  </p>



<p>Paul    01:12:30    How’s that fight going  </p>



<p>Randy    01:12:32    Very slowly and they’re hung out for what is best I can make out our quads and a metaphysical reason. So for example, don’t ask Ryan who,  </p>



<p>Paul    01:12:44    Uh, he’ll be, he’ll be on the next episode.  </p>



<p>Randy    01:12:47    So you can follow up on this or you can ask him what’s his problem with Randy’s story? Yes, because he and I have been arguing in correspondence. He, I never heard of him. I, I had given talks at MIT where I imagined he was present. And, uh, and I met on a go a few times, but, um, whose lab he came out of, but he emailed me that when the day it was an embargoed, his science paper, when he was still in Tonegawa was lab showing that they could make the plastics in emphasis go away. And the information was still there. And the email said, I think you’ll find this interesting. And I wrote back, yes. I find this very, very interesting indeed. Okay. So he, and I agree that the information isn’t stored in this and now in the plastic sentences, and he admits that he does not have a story about how the information is stored  </p>



<p>Paul    01:13:44    The in grim, but  </p>



<p>Randy    01:13:46    These all focused on these cell assemblers, he’s focused on this sparse coding. And I say, yeah, Thomas Tomas, that’s all very interesting, but we both think that the real name of the game is looking for the end brand and those cell assemblies. They aren’t, they haven’t ran your own work, shows that it must be inside those cells. I can’t get them to go out and it’s all hung up about information. He doesn’t like the idea that we have to think in terms of Shannon information, he’s read Dennett. And he, he believes that there’s in semantic information. And I, I know Ben very well. We have a lengthy email correspondence in which I’m trolling Bennet, Daniel, and saying, Daniel fact is you have no idea what you mean by semantic information. And then it more or less admits that that’s true. I said, you know, Shannon information is the only game in town. Uh, semantic information is just philosophers hand-waving  </p>



<p>Paul    01:14:51    So, but, but, but the recent op the genetic work where, um, you know, particular cells and networks of cells,  </p>



<p>Randy    01:14:58    Then they can excite behavior that is informed by the store information. They’ve shown that over and over again, and now people are showing it in the hippocampus, right?  </p>



<p>Paul    01:15:08    But that doesn’t change your story. It doesn’t change your view  </p>



<p>Randy    01:15:11    Because it doesn’t even address the question I’m posing, which is alright, you excite those cells. And the output signals from that cell is informed by acquired information. Where is it? Did some neighboring cells say, oh, you need to know this, right? Uh, or as your own experiments tend to show, they got that information from inside themselves. Well, once you get inside a cell, it’s all molecules, right? Uh, very big, complicated molecules and  </p>



<p>Paul    01:15:52    Networks of molecules,  </p>



<p>Randy    01:15:54    Even railroads and structures build them in the ribosome, for example. But, but basically we’re down to the molecular level of structure. Right. And, uh, and, uh, it keeps saying your own work shows that that’s the case. I cannot persuade him and it’s just driving me nuts. I mean, uh, Rick is a five or six years old now and I thought, oh, wow, this is the breakthrough. Now all those, uh, insanely ambitious molecular biologists, they’ll jump on this. And they’ll trace that cascade. And they’ll use this ability to, to observe single molecules fluorescing inside individual cells. I mean, they’ve created the most astonishing tools. And once they get to the end, they can slice and dice it with CRISPR and so on. And they can find out that code. It seems to me like, this is so arduous. I cannot,  </p>



<p>Paul    01:16:55    You’ve learned multiple things throughout your career. Why don’t you just go learn molecular experimental, molecular biology and start on it.  </p>



<p>Randy    01:17:07    And, uh, you know, it takes long time to become a molecular biologist. And besides that, I would have to get a grant. And so, I mean, that’s the other thing, the, uh, and then there are no way with somebody with my background could get a grant. I mean, this effort, although it seems to me obvious what the general strategy is. I don’t mean to minimize how difficult it would be and the kind of resources. I mean, you need the kind of money that only a molecular biologist can get. I mean, people like me, we get the, the, the rounding error in the molecular biology grants, right. Uh, so you’re not gonna pursue that cascade with a $20,000 a year, right. It’s going to be more like $5 million a year. Right. And it needs to become competitive, which it always does in microbiology. That is if one or two of the smartest young upstarts started doing this, then the rest of the field would say, oh shit, maybe I’m missing the train. Uh, maybe, maybe I better get on that train before it leaves the station. Right. I’m trying to stir up that kind of anxiety. But so far I have not succeeded.  </p>



<p>Paul    01:18:21    Well, you’ve been driving your train for a long time along those, those very tracks. So this is a great place to leave it. I’m going to I’ll play that last little clip there for Tomas when we talk perhaps, and now can respond. Thank you for the very, very fun, uh, conversation. Keep up the good fight, Randy. I appreciate it. I  </p>



<p>Randy    01:18:40    Enjoyed this. Thank you.  </p>



<p></p>

</div></div>


<p>0:00 – Intro<br />6:50 – Cognitive science vs. computational neuroscience<br />13:23 – Brain as computing device<br />15:45 – Noam Chomsky’s influence<br />17:58 – Memory must be stored within cells<br />30:58 – Theoretical support for the idea<br />34:15 – Cerebellum evidence supporting the idea<br />40:56 – What is the write mechanism?<br />51:11 – Thoughts on deep learning<br />1:00:02 – Multiple memory mechanisms?<br />1:10:56 – The role of plasticity<br />1:12:06 – Trying to convince molecular biologists</p>
]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/3946e667-955a-4940-956f-56df58bb21ae-126-Randy-Gallistel-public.mp3" length="77062878"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[




Support the show to get full episodes and join the Discord community.











Randy and I discuss his long-standing interest in how the brain stores information to compute. That is, where is the engram, the physical trace of memory in the brain? Modern neuroscience is dominated by the view that memories are stored among synaptic connections in populations of neurons. Randy believes a more reasonable and reliable way to store abstract symbols, like numbers, is to write them into code within individual neurons. Thus, the spiking code, whatever it is, functions to write and read memories into and out of intracellular substrates, like polynucleotides (DNA, RNA, e.g.). He lays out his case in detail in his book with Adam King, Memory and the Computational Brain: Why Cognitive Science will Transform Neuroscience. We also talk about some research and theoretical work since then that support his views.







Randy’s Rutger’s website.Book:Memory and the Computational Brain: Why Cognitive Science will Transform Neuroscience.Related papers:The theoretical RNA paper Randy mentions: An RNA-based theory of natural universal computation.Evidence for intracellular engram in cerebellum: Memory trace and timing mechanism localized to cerebellar Purkinje cells.The exchange between Randy and John Lisman.The blog post Randy mentions about Universal function approximation:The Truth About the [Not So] Universal Approximation Theorem


Transcript

Randy    00:00:03    Usually when I ask neuroscience is how you and code a number either in Asian apps or however many synopsis they think might be necessary. That’s a conversation stopper. All I get is hand ways, you know? Well, you see, there are lots of synopsis and it’s a pattern. It’s an absence. Well, could you say something about the pattern? I mean, how does the pattern for 11 different for the pattern from three, for example, can you shed a little light on that? You do not want to answer that question. The end is the low-h...]]>
                </itunes:summary>
                                                                            <itunes:duration>01:19:57</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 125 Doris Tsao, Tony Zador, Blake Richards: NAISys]]>
                </title>
                <pubDate>Wed, 19 Jan 2022 23:19:34 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-125-doris-tsao-tony-zador-blake-richards-naisys</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-125-doris-tsao-tony-zador-blake-richards-naisys</link>
                                <description>
                                            <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2022/01/art-125-01.jpg" alt="" class="wp-image-1626" />



<p class="has-text-align-center"><strong>Support the show to get full episodes and join the Discord community</strong>.</p>



<div class="wp-block-image"><a href="https://www.patreon.com/braininspired"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2021/09/become_a_patron_button@2x.png" alt="" class="wp-image-1434" /></a></div>



<p>Doris, Tony, and Blake are the organizers for this year’s NAISys conference, <a href="https://meetings.cshl.edu/meetings.aspx?meet=NAISYS&amp;year=22">From Neuroscience to Artificially Intelligent Systems (NAISys)</a>, at Cold Spring Harbor. We discuss the conference itself, some history of the neuroscience and AI interface, their current research interests, and a handful of topics around evolution, innateness, development, learning, and the current and future prospects for using neuroscience to inspire new ideas in artificial intelligence.</p>



<ul class="blocks-gallery-grid"><li class="blocks-gallery-item"><img width="768" height="768" src="https://braininspired.co/wp-content/uploads/2022/01/Tsao_hires_pic_RESIZE-768x768-1.jpg" alt="" class="wp-image-1628" /></li><li class="blocks-gallery-item"><img width="250" height="250" src="https://braininspired.co/wp-content/uploads/2022/01/ZadorxGerber.jpg" alt="" class="wp-image-1629" /></li><li class="blocks-gallery-item"><img width="1024" height="1006" src="https://braininspired.co/wp-content/uploads/2022/01/EmbeddedImage-1024x1006.jpg" alt="" class="wp-image-1627" /></li></ul>



<ul><li><a href="https://meetings.cshl.edu/meetings.aspx?meet=NAISYS&amp;year=22">From Neuroscience to Artificially Intelligent Systems (NAISys)</a>.</li><li>Doris:<ul><li><a href="https://twitter.com/doristsao">@doristsao</a>.</li><li><a href="https://www.tsaolab.caltech.edu/">Tsao Lab</a>.</li><li><a href="https://www.nature.com/articles/s41467-021-26751-5.pdf">Unsupervised deep learning identifies semantic disentanglement in single inferotemporal face patch neurons</a>.</li></ul></li><li>Tony:<ul><li><a href="https://twitter.com/TonyZador">@TonyZador</a>.</li><li><a href="http://zadorlab.labsites.cshl.edu/">Zador Lab</a>.</li><li><a href="https://www.biorxiv.org/content/10.1101/582643v1">A Critique of Pure Learning: What Artificial Neural Networks can Learn from Animal Brains</a>.</li></ul></li><li>Blake:<ul><li><a href="https://twitter.com/tyrell_turing">@tyrell_turing</a>.</li><li><a href="https://sites.google.com/mila.quebec/linc-lab/home">The Learning in Neural Circuits Lab</a>.</li><li><a href="https://proceedings.neurips.cc/paper/2021/file/d384dec9f5f7a64a36b5c8f03b8a6d92-Paper.pdf">The functional specialization of visual cortex emerges from training parallel pathways with self-supervised predictive learning.</a></li></ul></li></ul>


<div class="lightweight-accordion"><span>Transcript</span><div class="lightweight-accordion-body">

<p>Tony    00:00:04    Goal of the conferences to bring machine learning and computational neuroscience back together. Again, a lot of the, uh, major insight in deep learning and artificial intelligence came from neuroscience. In fact, you could, you could basically say that almost all of them.  </p>



<p>Blake    00:00:26    There has been a lot of interest in the computational neuroscience community in bringing machine learning and AI back on board, but the other direction has yet to be fully recouped. So that direction of taking inspiration from the brain to build better AI systems is precisely the gap that I think we wanted to fill with this conference. And which is arguably still a gap.  </p>



<p>Doris    00:00:53    I mean, I, I feel like this, this neuro AI is as fundamental as, you know, physics or chemistry. It’s, you know, the study of intelligence perceptional, these, you know, there there’s certainly like fads, I mean, in, in how you analyze data using...</p></div></div>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[




Support the show to get full episodes and join the Discord community.







Doris, Tony, and Blake are the organizers for this year’s NAISys conference, From Neuroscience to Artificially Intelligent Systems (NAISys), at Cold Spring Harbor. We discuss the conference itself, some history of the neuroscience and AI interface, their current research interests, and a handful of topics around evolution, innateness, development, learning, and the current and future prospects for using neuroscience to inspire new ideas in artificial intelligence.







From Neuroscience to Artificially Intelligent Systems (NAISys).Doris:@doristsao.Tsao Lab.Unsupervised deep learning identifies semantic disentanglement in single inferotemporal face patch neurons.Tony:@TonyZador.Zador Lab.A Critique of Pure Learning: What Artificial Neural Networks can Learn from Animal Brains.Blake:@tyrell_turing.The Learning in Neural Circuits Lab.The functional specialization of visual cortex emerges from training parallel pathways with self-supervised predictive learning.


Transcript

Tony    00:00:04    Goal of the conferences to bring machine learning and computational neuroscience back together. Again, a lot of the, uh, major insight in deep learning and artificial intelligence came from neuroscience. In fact, you could, you could basically say that almost all of them.  



Blake    00:00:26    There has been a lot of interest in the computational neuroscience community in bringing machine learning and AI back on board, but the other direction has yet to be fully recouped. So that direction of taking inspiration from the brain to build better AI systems is precisely the gap that I think we wanted to fill with this conference. And which is arguably still a gap.  



Doris    00:00:53    I mean, I, I feel like this, this neuro AI is as fundamental as, you know, physics or chemistry. It’s, you know, the study of intelligence perceptional, these, you know, there there’s certainly like fads, I mean, in, in how you analyze data using...]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 125 Doris Tsao, Tony Zador, Blake Richards: NAISys]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2022/01/art-125-01.jpg" alt="" class="wp-image-1626" />



<p class="has-text-align-center"><strong>Support the show to get full episodes and join the Discord community</strong>.</p>



<div class="wp-block-image"><a href="https://www.patreon.com/braininspired"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2021/09/become_a_patron_button@2x.png" alt="" class="wp-image-1434" /></a></div>



<p>Doris, Tony, and Blake are the organizers for this year’s NAISys conference, <a href="https://meetings.cshl.edu/meetings.aspx?meet=NAISYS&amp;year=22">From Neuroscience to Artificially Intelligent Systems (NAISys)</a>, at Cold Spring Harbor. We discuss the conference itself, some history of the neuroscience and AI interface, their current research interests, and a handful of topics around evolution, innateness, development, learning, and the current and future prospects for using neuroscience to inspire new ideas in artificial intelligence.</p>



<ul class="blocks-gallery-grid"><li class="blocks-gallery-item"><img width="768" height="768" src="https://braininspired.co/wp-content/uploads/2022/01/Tsao_hires_pic_RESIZE-768x768-1.jpg" alt="" class="wp-image-1628" /></li><li class="blocks-gallery-item"><img width="250" height="250" src="https://braininspired.co/wp-content/uploads/2022/01/ZadorxGerber.jpg" alt="" class="wp-image-1629" /></li><li class="blocks-gallery-item"><img width="1024" height="1006" src="https://braininspired.co/wp-content/uploads/2022/01/EmbeddedImage-1024x1006.jpg" alt="" class="wp-image-1627" /></li></ul>



<ul><li><a href="https://meetings.cshl.edu/meetings.aspx?meet=NAISYS&amp;year=22">From Neuroscience to Artificially Intelligent Systems (NAISys)</a>.</li><li>Doris:<ul><li><a href="https://twitter.com/doristsao">@doristsao</a>.</li><li><a href="https://www.tsaolab.caltech.edu/">Tsao Lab</a>.</li><li><a href="https://www.nature.com/articles/s41467-021-26751-5.pdf">Unsupervised deep learning identifies semantic disentanglement in single inferotemporal face patch neurons</a>.</li></ul></li><li>Tony:<ul><li><a href="https://twitter.com/TonyZador">@TonyZador</a>.</li><li><a href="http://zadorlab.labsites.cshl.edu/">Zador Lab</a>.</li><li><a href="https://www.biorxiv.org/content/10.1101/582643v1">A Critique of Pure Learning: What Artificial Neural Networks can Learn from Animal Brains</a>.</li></ul></li><li>Blake:<ul><li><a href="https://twitter.com/tyrell_turing">@tyrell_turing</a>.</li><li><a href="https://sites.google.com/mila.quebec/linc-lab/home">The Learning in Neural Circuits Lab</a>.</li><li><a href="https://proceedings.neurips.cc/paper/2021/file/d384dec9f5f7a64a36b5c8f03b8a6d92-Paper.pdf">The functional specialization of visual cortex emerges from training parallel pathways with self-supervised predictive learning.</a></li></ul></li></ul>


<div class="lightweight-accordion"><span>Transcript</span><div class="lightweight-accordion-body">

<p>Tony    00:00:04    Goal of the conferences to bring machine learning and computational neuroscience back together. Again, a lot of the, uh, major insight in deep learning and artificial intelligence came from neuroscience. In fact, you could, you could basically say that almost all of them.  </p>



<p>Blake    00:00:26    There has been a lot of interest in the computational neuroscience community in bringing machine learning and AI back on board, but the other direction has yet to be fully recouped. So that direction of taking inspiration from the brain to build better AI systems is precisely the gap that I think we wanted to fill with this conference. And which is arguably still a gap.  </p>



<p>Doris    00:00:53    I mean, I, I feel like this, this neuro AI is as fundamental as, you know, physics or chemistry. It’s, you know, the study of intelligence perceptional, these, you know, there there’s certainly like fads, I mean, in, in how you analyze data using neural networks and so on, that’s all true. But, um, yeah, the fundamental quest to understand intelligence, I don’t see how that can be called a that  </p>



<p>Speaker 4    00:01:20    This is brain inspired  </p>



<p>Paul    00:01:34    Step right up folks and get your tickets. That’s right. Get your tickets to the nicest conference this year. Hello everyone. It’s Paul nicest stands for, from neuroscience to artificially intelligent systems. This is a conference held at cold spring Harbor where the goal as you’ll hear Tony’s eight or say is to bring together the machine learning world and the neuroscience world with a particular focus on how neuroscience can help inform machine learning and artificial intelligence. So I had noticed that the deadline to submit abstracts and to get your tickets to, uh, join the conference, uh, is coming up actually it’s January 21st, right after the release of this podcast. So I thought it would be fun to have the three organizers of the conference this year on the podcast, just to have a broad conversation about their interests and topics related to the conference and neuro AI in general, Doris Tsao is a new voice on the podcast.  </p>



<p>Paul    00:02:35    She runs her lab at UC Berkeley, and she’s interested in how our brains form in variant representations of objects. And you could say she’s a, well-known already for her work, studying face patch areas in the cortex, in non-human primates. And in humans, Blake Richards is at McGill university. Uh, the last time he was on the podcast, we talked about largely his work, um, figuring out how backpropagation or, uh, something like backpropagation might be implemented in brains. And in this discussion, uh, we talked about his more recent interests. For example, figuring out how multiple streams of representations can be combined to help us generalize better. And Tony Zadar is at cold spring Harbor. And the last time he was on the podcast, uh, we focused on his paper, a critique of pure learning in which he makes the case that, uh, we need to pay more attention to evolution and the innate, uh, structures and abilities that organisms come into the world with.  </p>



<p>Paul    00:03:36    And during our discussion, we revisit, uh, the ideas from Tony’s paper, uh, and use it as a springboard to talk about development and learning and how these processes could be considered one kind of continuous optimization process. And in general, we have kind of a wide ranging discussion about many of the issues that are relevant to the nicest conference. So I encourage you to go to the nicest website, which you can find in the show notes at brain inspired.co/podcast/ 125 and consider whether it may be of interest of you to attend this year or another year. Thank you for listening and enjoy. So I thought we would just start, um, not so much by giving an introduction of yourselves, but maybe you, you guys can talk each about something that kept you up last night that you’re thinking about. Scientifically, I know that there are many things that keep people up, uh, these nights, but, uh, in the science realm, something that you’re working on, that’s just at the edge of your knowledge, Tony, would you want to lead us off?  </p>



<p>Tony    00:04:38    Sure. Um, well, so my lab is pretty diverse. So what keeps me up one night, um, isn’t necessarily what keeps me up the second night. I don’t get a lot of sleep at all, but most recently, what, what has been keeping me up is, uh, we’ve been working on applying, uh, some ideas about the genomic bottleneck to reinforcement learning. And we’ve been trying to figure out how we can compress the networks that we use for reinforcement learning by a couple of orders of magnitude and see if that can, uh, give us better generalization, uh, better transfer learning. And so there’s a lot of, a lot of exciting stuff going on there, but, um, that that’s sort of at the, at the edge of what we can do with that, it seems to be working, but there are some, some, some things that aren’t quite quite quite there yet. So we’re pretty excited about  </p>



<p>Paul    00:05:37    We’re going to come back to that as well. Uh, I have questions about that. All right, great. Uh, Doris, do you want to, uh, what, what kept you up last night?  </p>



<p>Doris    00:05:46    Yeah, I don’t know about last night, but the question that, um, I’ve been obsessed about for a long time now is how the brain solves segmentation. You know, we see our vision is really based on objects and there must be some way that the brain manages to bind all the parts of an object together and track those parts as they move around. And, um, whatever the code is, it should be fundamentally different from anything that we understand right now, because it has to be a dynamic code, right. The object comes twice as big. Like that code has to expand. Um, sometimes, you know, in computer vision, people like use colors and what is the analog of like this color label in the brain? So I would, you know, I don’t give everything to know the answer and that’s one of the big problems we’re working on right now.  </p>



<p>Paul    00:06:35    So you’re sort of famous for, uh, faces, right? Face, uh, patch areas, uh, in, in brains. But that wasn’t your original interest. Your original interest was, was objects. And now you’ve returned to that.  </p>



<p>Doris    00:06:48    Oh yeah. You know, I mean, I got into faces. I joke that I was like this, you know, 20 year detour and I’m now I’m doing back to what, what I want to do. Um, originally, right. My, my first experiment, when I was red student, I set up Machia for Mariah and I showed monkeys pictures of stereograms. Cause they want to understand how they represent 3d objects. And then, you know, I read the paper from Nancy Cowiche about faces and it seemed like a fun project, but might not work to, to show monkey spaces. And that sort of took its own life,  </p>



<p>Paul    00:07:17    Uh, 20 year diversion then. Huh? That’s I guess that’s how science work.  </p>



<p>Doris    00:07:22    I, I hope that they’ll come together. You know, what we’re discovering is base patches and it’s not, it’s not really about faces for me. That’s not, I don’t, I could care less about basis. There’s like one part of the brain that I could have leash and it would probably be my face there. So it wouldn’t, you know, be so shy. Um, it’s, you know, the faith-based patch system is beautiful. Um, we, we call it this turtles underbelly, right? It lets us get at the mechanisms for how the brain represents high level objects. And it gets us, uh, an experimental handle to all kinds of questions related to high-level object representation you have, including, um, one of the questions I assume we’re going to be talking about like unsupervised learning, how does the brain learn to recognize a face? Um, just from a few observations. And I think that’s also going to connect to this tracking and segmentation problem I talked about. So, um, yeah, it’s the face patch listening is a lot more than just about how, how brain represents basis.  </p>



<p>Paul    00:08:16    Is it a solved issue? I mean, there was controversy right over whether this particular brain area, uh, speaking of Nancy, Ken wishers work, uh, really is, um, representing faces. Is that solved? Is that, uh, are we done with that?  </p>



<p>Doris    00:08:32    Uh, no, it’s not salt. I think we do have a lot more insight into it. And one of the insights has actually come from deep networks, right. That I came on the scene, I don’t know, five or 10 years ago. So for the longest time Nancy’s lab and others had discovered these little islands of cortex selected for faces, bodies and colored objects and other things. And we have replicated this in monkeys. Um, and it was a total mystery. If there’s any principles governing how all of these regions are organized. Right. And there was also islands of surrounding cortex that no one had any idea what they’re really doing. And so it was this kind of, yeah, it was a big question, mark. And then just, you know, some sense, maybe it’s, there is no principal at all, right? This is what you get. And you get some islands of cortex that represent things that are understandable and other islands that don’t, and then deep networks, um, came on the scene and, and my postdoc can like bow, I’m not going to go through the whole story, but he basically, he did a very simple analysis.  </p>



<p>Doris    00:09:33    He passed a large number of objects through Alex net, um, and just did principal components analysis on activations and layer of C6. And then you can just look at the first two principal components and they span an object space. And the amazing thing is that if you look at what’s in those different quadrants of that object space, one quadrants turns out two faces. Another quadrant turns out to be things that look like bodies. And so, you know, something clicked and I was like, whoa, what if all of it cortex is actually laid out according to these two axes of object space that you discovered with the deep network. And that made a new prediction about a new network that we should find them turn out to exist. So, um, you have to first approximation, it seems like it cortex is actually laid out like an object space and based patches are one question of that. So, so it starting to make sense, but I think these patches are also, they also are specialized for basis in a very strong way, right? If you just like the contrast of a base, the cells response goes way down and love those things. Can’t be explained by just projection out to this generic object space. So it’s still an open question, but we have a lot more insight now,  </p>



<p>Paul    00:10:45    Blake, you, uh, have you figured out where you up last night, thinking about how face patches get learned in brains?  </p>



<p>Blake    00:10:52    Um, not specifically, but the thing that’s been keeping me up is related to some of this stuff. Um, so, uh, one of my big worries right now is the question of how to develop artificial intelligence that can engage in what we call systematic generalization. So that is not just generalization to unseen data points or even data points that might come from a different distribution, but specifically generalization that obeys some systematicity or some rules as it were. And humans are pretty good at this, right? So you can look at some puzzle, like I can give you some kind of shape based analogy where say, I show you a square, a triangle, and a square, and then I show you a circle, a diamond, and you have to fill in the last one and you’ll immediately kind of detect the rule above you say, okay, it goes one shape, two shape back to the first shape.  </p>



<p>Blake    00:11:51    So then you apply it again immediately. You can get the answer, you don’t have to see any data points. It just, the rule makes sense to you right away. And this is actually surprisingly hard to get in vanilla artificial neural networks. They don’t show this kind of systematic generalization and the old school answer to what you needed for that was symbol systems. And that’s still the answer some people give, but for a variety of reasons, which we can discuss, if, if we wish I’ve come to the conclusion that I think that systematic generalization doesn’t depend on the existence of a symbol system or anything like that, it just depends on the existence of separate representations for static object features in the environment. And on the other hand relations between objects, be it dynamic movement-based relations, or just spatial relations or any other kind of relation.  </p>



<p>Blake    00:12:50    You need this, this distinction between the objects you wreck, the objects that you represent and the relationships between them and those have to be represented by different systems. And if you have these separated, um, representations, there’s some data coming out of a few labs and some preliminary stuff from my lab as well, showing that then if you have these separate representations, you do get systematic generalization. And so then the interesting question for me is how could those separate representations possibly emerge? And this is where we publish a recent study showing that, um, if you optimize a deep neural network that has two different anatomically segregated streams, and you optimize it with an unsupervised loss to do prediction, you’ll actually get kind of segregated representations for static object features and movement or relation features. And so this is, um, I, I kind of broader interests now for me, that ties back into the unsupervised learning question, because basically I’m starting to think more and more that maybe the way we get to systematic generalization in the brain is by having systems that through evolution or learning in the lifetime have been optimized in such a way that you get separate representations for the relationships between objects and the objects themselves.  </p>



<p>Blake    00:14:13    And I think that once you have those separate representations, now you can get systematic generalization. And the reason is actually pretty simple it’s because systematic generalization depends on you having a sense of there being relations that can be applied to anything no matter what the object is. And so once you have those separated representations for your relations, that becomes possible,  </p>



<p>Paul    00:14:37    Uh, is hierarchy involved here or because it’s the way you described it, it sounds like a single level, right? So two representations, and then there’s some generalization, but is there a hierarchical structure that you’re thinking about as well?  </p>



<p>Blake    00:14:49    I mean, you definitely need hierarchy if you’re dealing with any kind of complex high dimensional input in principle, I think this same, uh, rule that you need separate representations for your relations and your objects in order to get systematic, generalization could even apply in situations where you already have a simplified representations that don’t require any additional hierarchy to get you what you need, but in 99% of the tasks that we would ever want an AI to do. And certainly for everything the brain does, yes, you need hierarchy because you don’t, you don’t care about say pixel over what relationships, right? Like you don’t care really about what this retinal ganglion cell and this retinal ganglion cells relationship are to each other. What you care about is the relationship of say, you know, where is my face relative to this other person’s face or something like that? Like, these are the sorts of high-level relationships that you care about. And so that requires hierarchy.  </p>



<p>Paul    00:15:46    So I was just reading about induction and deduction and abduction and how humans are so great because we are great, uh, abductors, right? We, we perform abductive inference. Is this related at all to that, to that? Uh, forgive me for the naive question.  </p>



<p>Blake    00:16:03    No, I mean, it’s definitely related to this stuff because what you could say to some extent is that standard deep neural networks are really good at induction. Um, and you know, I think there’s a lot of evidence for that at this point in time. And so both deductive and inductive reasoning are arguably still missing. And indeed, when we talk about systematic generalization, that is precisely related to, to these questions.  </p>



<p>Paul    00:16:32    So, uh, Tony, I know that you hate learning. Uh, the last time you’re on a podcast, we talked about your paper, a critique of pure learning personally, right? Yeah. You seem incapable. No, that, so let’s talk about the, uh, the conference that’s, that’s coming up, actually the deadline to apply and submit abstracts, right. Is just, uh, January 21st, I believe. And so this podcast, yeah, from neuroscience to artificially intelligent systems nicest.  </p>



<p>Paul    00:17:08    So it’s kind of interesting because, uh, I mentioned Tony’s paper because, uh, it is, um, in some sense, the antithesis of learning, that’s not true obviously, but, um, so you, but the rage these days is using these learning systems, artificial neural networks, deep learning networks. And Doris has already mentioned, uh, her work with the unsupervised learning and Blake, uh, just mentioned, uh, the same first of all, what’s the conference and, uh, what’s the goal of the conference. And then how did, uh, someone who, uh, is so anti deep learning networks, uh, come to be one of the, uh, organizers and are you the dissenting voice among, uh, the attendees?  </p>



<p>Tony    00:17:51    Um, well, I’ll, I’ll start by saying what’s the conferences, the goal of the conferences to, in some sense, bring machine learning and computational neuroscience back together again. So a lot of the, uh, major insight in deep learning and artificial intelligence came from neuroscience. In fact, you could, you could basically say that almost all of them, all of the major advances in artificial intelligence came from, uh, looking at neuroscience. So the very idea of formulating the, the question of our official intelligence, uh, uh, the interactions between collections of simple units, which we might be tempted to call neurons suggests its deep roots, right? And in fact, interestingly, even the annoyment architecture, which is in some sense, the opposite of, um, sort of neural network type architectures, even that architecture was an attempt by Von Neumann and explicit attempt by  to model certain aspect or at least capture certain essential features of how, um, the nervous system works.  </p>



<p>Tony    00:19:08    If you go back to the technical report from, I think around 47 or something on the first one, Norman computer, he devotes an entire chapter to comparing how the architecture that they propose relates to that, of, um, the brain. Um, and so, you know, convolutional neural networks, um, and the, the ideas of reinforcement learning, um, all of these come from tapping into neuroscience, but, and in fact, in the early days, uh, neurons, which was back then called nip, uh, was a meeting that drew together, both people from, uh, machine learning and neural networks and people in computational neuroscience. In fact, they were the same people. I mean, that was the, that was the meeting that, that was like my go-to meeting when I was a graduate student. It was the only place, the only sort of substantial meeting where you could present computational neuroscience. Um, but by the, by the nineties, by the mid nineties, those two fields had sort of diverged to the point where it wasn’t really sort of useful to have them as one meeting.  </p>



<p>Tony    00:20:20    And nowadays, I think most many, at least many people who work in, in, uh, artificial intelligence has sort of lost sight of the fact that any knowledge from from neuroscience was perhaps anything. But if you like, uh, an inspiration or an existence proof, so, you know, to, to hear him a lot of modern AI people talk, the, the role of neuroscience in AI is comparable to the, let’s say, um, role of birds in, um, aeronautics engineering, you know, yes. In, you know, in the beginning, man looked up at flying birds and said, if only we could fly too, but that’s where the connection stops. But, but of course that that’s not really true. So the goal of ISIS is to, to bring these two communities back together and have sort of, uh, get a conversation going again so that, you know, in the event that their current technologies for current approaches sort of asymptote at some point, which I, you know, incredible though these grants is, are, I think, uh, we still will need new ideas will sort of provide the foundation for those new ideas in this, in this meeting.  </p>



<p>Blake    00:21:38    If I, if I can add to that too. I think one of the interesting things about the way that it’s evolved in recent years is that, um, there has been a lot of interest in the computational neuroscience community in bringing machine learning and AI back on board to, to kind of do our explorations of the brain, but the other direction has yet to be fully recouped. And so that direction of taking inspiration from the brain to build better AI systems is precisely the gap that I think we wanted to fill with this conference. And, and which is arguably still a gap because, you know, if you go to co-sign or whatever, you see a lot of deep neural networks, a lot of AI stuff, but they’re all addressed at answering questions about the brain. Whereas at neuro rips, I would say though, there is a growing neuroscience contingent, it’s still a very small part of it. And it’s by no means that the mainstream of the conference  </p>



<p>Paul    00:22:40    Doris, do you agree with, with all of that? So you, um, you know, you were just talking about using unsupervised learning models to inform the neuroscience, which like Blake was just saying is the general trend of the direction of the arrow these days. But, you know, just from the title of the conference from neuroscience to artificial intelligence systems, uh, you know, speaks to the, the arrow that Blake was talking about. Do you agree that, um, that there, well, first of all, there’s lots of things. So what Tony was talking about, uh, the original inspiration, you know, trying to bring that back, do you agree that it went away that it’s, that, uh, the AI community, uh, doesn’t appreciate neuroscience and then also in your own work, um, you seem to be, you know, uh, going the normal way, the modern, normal way from AI to neuroscience. Uh, do you have aspirations to go to reverse that arrow? Sorry. That’s like seven questions.  </p>



<p>Doris    00:23:35    That’s a lot of questions. Yeah. So I first, I should say, I’ve, I’ve never attended this nice nicest conference before, so I’m super excited. Um, I’m not totally sure what to expect, except that meet some incredibly smart people thinking about this question of how brains inspire machines and vice versa, how machines can inform our understanding of brains. I can’t, I don’t, I don’t know that people in AI have been thinking last 10 years. I think some of them have been deeply interested in the brain throughout. Right. I think Jeff Hinton has always saw himself first as someone who his goal is to understand the brain  </p>



<p>Blake    00:24:10    A hundred percent. And let me say there, there has always been the remaining core community in the AI world that believes in the need for taking inspiration from neuroscience. Huh?  </p>



<p>Tony    00:24:26    I think actually some of the most influential people, precisely the most influential people are the ones who do keep paying attention to neuroscience. I mean, clearly young Coon, clearly Joshua Bengio clearly cares, I think. Um, you know, so, so th th the people who have made many of the major, uh, advances actually were paying attention, I think that what is lost is for the younger generation, I mean, uh, sort of modern AI has become such a large field on its own that it sort of feels like it’s self-contained. I think that that’s really the, the issue it’s, it’s almost as if one were to, um, you know, try to try to try to, um, make it fundamental advances let’s say, in electrical engineering without quite understanding, um, the underlying physics.  </p>



<p>Blake    00:25:29    Yeah, I can. I also, I want to add to that, cause I think there’s a funny dynamic that has come about because of the fact that as Tony said, the most influential people are, are the ones who still fundamentally, both seek and believe in the need for inspiration from the brain. And that is that, um, there’s a large contingent, I feel like of AI researchers who see themselves almost as like rebels or something like that for articulating the idea that we never need to look at brains. And this is the sort of like the cool thing to say as it were for some people, um, precisely because they, they, they see someone like Jeff Hinton or Yoshua, Bengio say, oh, brains are critical inspiration for AI. And they’re like, no, no, no, that, you know, I’m going to, I’m going to show that’s nonsense. These old guys don’t know what they’re talking about.  </p>



<p>Blake    00:26:21    And so I have many interactions with young researchers and on some level, their skepticism, I think is good. It’s healthy to be skeptical of what the older generation tells you. But it’s always funny for me when I have conversations with some really young people in the tech world. And they say to me, wow, you know, none of this really has anything to do with brains. It’s all just matrix multiplication and stuff. And meanwhile, you know, I, a part of me wants to sit down and say, well, listen up Sonny, Jim let’s, let’s do a history lesson here and go through the entire process with them. So what’s funny is that I think that many AI people have left the neuroscience stuff to the side. And some of them see that as a sort of like bold rebellion against the old guard.  </p>



<p>Doris    00:27:06    Oh, I was just going to say Tony, I mean, this also relates very much to your, your, um, famous essay, right? I mean, you know, we shouldn’t ignore these hundreds of millions of years of evolution. Like the brain has figured out so much structure and we should, you know, get a huge leap. We can, we can figure out what those structures are. What are those fundamental structures that enable intelligence? Like, it just seems ridiculous to, to ignore that, like why.  </p>



<p>Tony    00:27:31    Yeah, absolutely. I mean, that’s what, that’s what, that’s what really keeps me up at night. It’s the idea that, you know, it’s like if we want to achieve faster than light travel and some aliens plunk down a spaceship capable of faster than light travel, right. We would sit there trying to reverse engineer that spaceship to figure it out. Right. So we have that. We are surrounded by creatures that have solved the problems that we’re trying to solve, not just humans, but animals, simple animals who are outperforming us, worms, flies, bees, spiders, uh, my dog, rats, they’re all outperforming many, many, many things that we wish we only wish we could, we could, um, build machines to do. And some of them are so simple and we still don’t understand them. It’s embarrassing. Right. We there’s this, um, you know, this great cartoon of a bunch of, um, of, uh, Lego pieces, right. And it’s just an empty box and like, ah, okay, we, we have everything. We just need to figure out how to put them together. We know so much, and yet we don’t quite know how to put them together and in the appropriate, uh, meaningful way. Um, so that’s what, that’s what, you know, it’s just such an obvious source of not just inspiration, but specific guidance.  </p>



<p>Tony    00:29:07    When I was in, in, um, in graduate school actually, like I think it was one of the first, it was a summer course at MDL that I was at, you know, people were staying up late and drinking and you know what one very senior neuroscience respected neuroscientist was, um, talking about how, you know, we were on the brink of understanding how the brain works and he started profits and the coming of the ma he basically started prophesying the coming of the Messiah of neuroscience who would, you know, sort of reveal the truth to us. And, you know, maybe he had, uh, he was, he was, uh, he had a little bit too much alcohol onboard. And so he was, uh, you know, personifying it. But I think many of us feel like we’re just on the brink, like if only somebody could explain to us what we’re missing. Right. And some of us, maybe even what I hope to discover that missing thing, it’s just so frustrating that we, we know so much. And we don’t quite know what we don’t what, you know, that missing piece.  </p>



<p>Doris    00:30:18    Yeah. I mean, yeah. Blake was talking about this factorization between relations and, um, you know, w what the object properties. Right. And so that reminded me, I, him at this, how you think about it, Blake, but, you know, when you try to generate invariant representation, you kind of, you know, on the one hand you’re saying that this thing that’s transformed is the same. So you extrapolate those various features at the same time. You want to know what that transformation is right now. Did it rotate? Did it expand? And, um, so I, I think that I, you know, I think a lot of the structure that somehow built in through these, you know, this genome that’s supposed to run the wire down here is tick struck the 3d world, right. That the geometric aspects, like how things transform, like being able to deal with that. Cause once you have that, then you can, you know, do unsupervised learning all of that. Right. Because you can track this object and you have like suddenly millions, billions of training samples for free. So I that’s, my, my hunch is like a lot of it, like if we can understand that, um, it will go a long way. So I really resonate with Blake there. And, um, yeah,  </p>



<p>Blake    00:31:19    One thing that I’d say about that though, and I think this gets at, um, where Tony’s essay has influenced my thinking a bit more, and I think, um, is really an important thing to remember when we’re talking about inspiring AI with the brain is precisely as the Doris kind of gestured up there. These, these systems, if we think about the visual system for a moment, you know, it has surely been optimized over the course of evolution, uh, to engage in exactly that kind of invariant representation for the object. And then you have your representations of the spatial relations and stuff. So the object can rotate and it doesn’t look different to you. And this is all built into our genomes, but, you know, cause I, I suspect that there’s some of that in animals, the instant they’re born. But then on top of that, there’s some, uh, a whole layer of supervised unsupervised learning, sorry, unsupervised learning throughout the early life.  </p>



<p>Blake    00:32:17    That takes those underlying inductive biases that help us to segregate out kind of constant objects and relations between objects and stuff. And then can do a lot more learning on top of that so that we can learn really particular features of particular objects. And, you know, this is how a cat moves. This is how a ball moves. This is the nature of, you know, uh, playing with, uh, spinning top, et cetera. And, and, and all of those particular relations and properties that hold for the unique objects that evolution couldn’t necessarily have known about in advance our, what we learn through unsupervised learning, but that’s all done on the base of a fundamental, very strong inductive bias to have these in, in variant representations of constant objects and relations between them in a 3d world.  </p>



<p>Paul    00:33:10    Since you mentioned, uh, Tony’s paper, we don’t need to make the whole conversation focused on this. But so I recently had Robin he’s singer on the podcast. I think it may have been last episode actually, he’s the author of the self-assembling brain. And, um, the way that it’s sort of pitted generally is there’s evolution, there’s innateness, right? So we come into the world and there’s the structure, um, which is encoded in the DNA somehow. Right. And then there’s learning on top of it. But his argument is that, uh, what we are forgetting, which is a impossibly, uh, complex myriad of, um, information unfolding is what he calls it, is that during, from genes to the connectome, that developmental process is a crucial missing aspect. Um, and he kind of considers it, um, an algorithm from the DNA to the connectome because our DNA can’t specify the entire connectome. Right. But then, uh, then on top of that, there’s learning. So do we need to consider development or can we really just figure out the strut, the right structure, uh, and build that in a structure in the connectome and or in the case of artificial networks in there?  </p>



<p>Tony    00:34:19    Yeah. I think it’s clear that the way you get, I mean, it’s not as clear, it’s that the sort of way that I would just think about it, that the way you get from a genome to any physical structure is via development. And the, the observation that the amount of information in the genome is orders of magnitude lower than the amount of information in the connectome implies that there have to be actually relatively simple rules for going from a genome to connect them. And those are developmental rules now on top. So some of those rules are, are, um, kind of be like activity dependent and it’s those activity dependent rules probably that over the course of evolution got sort of co-opted and formed the basis for activity dependent learning. In fact, from a neuroscientist point of view, at least from a synaptic neuroscientists point of view, it’s sometimes pretty hard to distinguish mechanisms for development from mechanisms, for learning, you know, uh, LTP long-term potentiation is the leading candidates and ethic mechanism for, uh, learning and memory.  </p>



<p>Tony    00:35:44    But in fact, some of the, uh, earliest results in LTP were in development. So it’s really, there is no sharp distinction from an organism’s point of view, uh, between, uh, mechanisms of development and mechanisms of learning. Some of the very earliest mechanisms of development, um, are, are clearly distinct neurogenesis and things like that probably are, um, you know, and also laying out the, the, the basic wiring diagram of a neural circuit don’t necessarily depend on, um, activity, but for the most part, you know, learning and development, they go hand in hand in biology and, and the distinction between them is kind of artificial.  </p>



<p>Blake    00:36:32    Yep. I would a hundred percent agree with that. And I think that Tony made another really interesting point there, which is that what we call learning is probably a series of other mechanisms related to general developmental properties of the neuro nervous system that got co-opted over the course of evolution and which somehow in mammals and some birds and stuff got linked into specifically, um, things like error reduction mechanisms. And that was what then transitioned us towards what we might call learning and the proper sense of it,  </p>



<p>Paul    00:37:10    The proper sense. So there’s a bias right there, right.  </p>



<p>Blake    00:37:13    Well, okay. So here’s, here’s, I guess what I would say about, you know, where I distinguish learning from other activity dependent properties, and this goes back to work. I did in my PhD, uh, where I was doing a lot of work on synaptic, plasticity and tadpoles. And whenever we would show changes in the tadpoles visual system, as a result of, you know, activity dependent processes, people would always ask, well, how do we know that that’s, uh, not just, you know, some program that the genome has built in it, but which needs some activity to unroll. And the answer was always, well, we look for specifically stimulus instructed changes. So if you can show that the nature of the changes depend not just on, you know, there being activity, but on specifically the stimulus, you show the animal. And so if you show different stimuli, you get different results in terms of how the brain develops. Then you’ve got something that’s, you know, arguably learning, cause it’s actually reflecting the animal’s experiences rather than it being simply a gate that opens to allow for the developmental program to enroll  </p>



<p>Paul    00:38:23    Blake. I was going to ask you about this anyway. So I’m going off of what you just said. I was curious, you know, in Tony, um, you brought up LTP and synaptic mechanisms of learning. What, what your take is on the new, uh, dynamical, uh, fad, where you’re looking at manifolds changing and, um, neural activity, uh, progressing through a manifold, low dimensional space, and that learning can take place in the dynamics of the network, um, that it’s not all plasticity based. Are you on board with, with this story?  </p>



<p>Blake    00:38:56    And I’m certainly on board with it. And I mean, I think we’ve known that for a long time, because there are certain types of tasks that you don’t need long-term potentiation for. And so therefore it has to be something other than synaptic plasticity on some level, right. Um, and the dynamics is a reasonable place to start. I think that, uh, the, you know, my favorite demonstration of that was actually a paper from Jane Wang and Matt Bob, v-neck where they do metal learning in a neural network and a deep neural network. But the metal learning quote unquote is interesting because the inner loop was actually just dynamics of activity. And they show that if you, if you train the network such that the dynamics of active activity represent your sort of plasticity in the inner loop, and then you’ve got your outer loop where you actually change your synaptic connections, you can end up recapitulating a lot of really fascinating experimental evidence related to how animals and people use their prefrontal cortex to solve a whole host of problems. Um, so that’s just one example paper, and there’s been a few around for awhile. So I think that trend is, is, you know, gaining steam precisely because on some level there’s, there’s something really, really  </p>



<p>Paul    00:40:06    Well. I had that. I was up, uh, too late the other night, and I had the thought that maybe the quote unquote learning, you talked about what proper learning that the learning that’s taking place in the dynamics, uh, may not be considered learning per se, but just movement among an inductive bias that’s already built in. Right. Uh, and that inductive bias is built in through the synaptic connection weights, right? So it’s, it’s like we can, we have these capabilities of, um, moving along the dynamical, uh, manifold landscape to throw some jargon out there, but we can only move into spaces that, um, already exists. It’s not like true, quote-unquote learning that’s happening because we’re already set up, we already have those available spaces to, um, to visit  </p>



<p>Tony    00:40:52    Well, what, what makes that less learning life than any other kind of learning? I think pretty much we, we can only learn things that we can learn, right? Like, you know, a quintessential example of things that we can learn. Uh, I think we can learn is, is language. And yet, you know, I believe that we have a circuit that predispose us to learn language. Now, you know, the details of the specific language we learn, you know, it depend on the language that we’re exposed to. And it’s hard to articulate exactly what it is that is common among all languages, but it’s still mean, I think it’s pretty clear. We have some, some, if you like innate circuitry that enables us to acquire a language very quickly, and there are some flops there, some, some three parameters that get filled pretty quickly over the course of the first few years of life that allow us to acquire sounds and words and, and basic syntax and grammar. Yup.  </p>



<p>Doris    00:41:59    Yeah. Also I would say I’m from the experimental side now there’s some amazing experiments being done with BMI’s to see the capacity for the brain to learn. I think it’s like really, for me, at least it was kind of shocking that you can, you know, set up a BMI. So, um, like a mouse can learn, um, to control a cursor based on the activity of pretty much like any neuron, you know, an arbitrary chosen set of neurons, right. In, in, in some arbitrary piece of brain, like they can control a cursor by controlling that activity like that they could learn to do. That was, it was pretty shocking to me. And I, you know, it sort of goes against this idea that you’re only able to learn very specific,  </p>



<p>Paul    00:42:37    But in that case, so just continuing on my late night thought experiment, uh, in that case, couldn’t you argue that the mouse already had the ability to make those movements, right? So it can exp it can’t explore some completely novel, um, way of, of mapping. So in my thought experiment, it would require like actual changes in the synaptic structure, right. In, in the, um, connections between the units, because you could say that, well, the mouse already had the ability to, um, visit those, those, uh, spaces and already had visited those spaces probably right throughout time. So it, it’s not that challenging to remap the population, um, dynamics to that space. Does that make sense? I don’t know why I’m arguing about this. It’s this is about you and not me about you guys, not me. So I’m sorry  </p>



<p>Doris    00:43:27    To me, it’s still pretty stunning. Like you choose like an arbitrary set of neurons. Like they, I, who knows what they’re actually coding and you can just get the mouse to use those, the activity of those neurons to control this thing. It suggests something about incredible flexibility. Right? You mentioned remapping it, there has to be some kind of remapping and whatever the mechanism is, this has to be incredibly flexible. And it gets us this question of how do you, how, how does the brain do this dynamic routing, right? Like, like I tell you, you know, um, Paul have you, if you see me at nicest, like give me a hug. And if you remember that, like you’ve suddenly made a connection from, you know, your face cell representing me to, to your, you know, cells representing hugging. And it’s a dynamic connection. That connection has never happened before the cat. How on earth do you do that?  </p>



<p>Paul    00:44:15    But I, I have not hugged many people Doris, but I have hugged people. So that’s within the realm of my current capabilities, right?  </p>



<p>Doris    00:44:21    Uh, to wire it’s specific for me, it’s like the magic part.  </p>



<p>Blake    00:44:26    And I, I just want to say something that I think gets at what you’re trying to up Paul, which is, and this ties back to Tony’s first answer to you as well. All learning systems are constrained on what they can learn. There, there is no such thing as a learning system, that’s not constrained on what it can learn. And in fact, this has been proven mathematically with the no free lunch theorems. If a learning system truly has no prior on what it can learn, it basically just learns everything poorly. So to get good learning, you necessarily have to constrain your system to learn well in certain areas. And in this way, um, you know, if we show that, say brains, have certain restrictions placed on the sorts of things they can learn, uh, that’s unsurprising. That would almost, it would be more surprising if that didn’t exist.  </p>



<p>Blake    00:45:15    And that’s where I agree with Doris his point, which is that sometimes it’s shocking, the things that brains apparently can learn. In my opinion, when it seems like it shouldn’t necessarily be something that’s learnable that, like, why would we not be constrained to, to learn that, you know, I suppose I think different species have different degrees of this. And so for me, I think humans are remarkably adept at learning a surprisingly large number of arbitrary things. And, but that doesn’t mean that we’re not constrained. We’re very much so still constrained. It’s just that it’s surprising how arbitrary it can be. Yeah.  </p>



<p>Tony    00:45:54    Just to circle back to the question that you asked awhile ago about, do I hate learning? How much do I hate learning?  </p>



<p>Tony    00:46:06    I obviously I personally hate learning things, but, but I think that the point that I was trying to make in that essay was not that learning isn’t a thing that exists, but that a great deal of what non neuroscientists sometimes imagine depends on learning. Probably doesn’t depend on learning by an individual over the course of his or her lifetime, and that we are biased by paying attention to humans who probably learn way more than almost any other animal, probably more than any other animal, but even we don’t learn as much as we think we do, but animals, most animals don’t actually require a great deal of learning to, to function properly. So they’re capable of learning, but if you look at most insects, they can’t afford to spend their first couple months figuring out how the world works, right. They come out of whatever it is that insects come out of.  </p>



<p>Tony    00:47:14    And they’re, they’re pretty much ready to roll, right. Or fly or bite or crawl or do whatever they’re going to do. Yeah. I mean, you know, I have colleagues who study learning into Sophala and so, you know, flies are capable of learning, uh, and that certainly is adaptive, but many of the things that we’re impressed at that, um, insects and frankly, even mammals do probably doesn’t require a great deal of learning. In fact, probably maybe just a bit of fine tuning to the environment. So, you know, you’re watching a squirrel jumping from tree to tree that thrill didn’t like figure out de Novo how to jump from tree to tree, like all squirrels learned to do it pretty well.  </p>



<p>Blake    00:47:57    And I think I just want to note something, cause I think there’s this misperception that there’s a big divide on this question. Tony has actually convinced me of this point. And, and I really don’t think that it’s incorrect to say that for the vast majority of species, a lot of the learning that has occurred, quote unquote, was actually optimization over the course of evolution. I think that, um, what is maybe sometimes misunderstood about this argument though, and, and Tony or Doris, you can disagree with me if, if, if you do on this point, is that, that doesn’t mean that then for AI, the message is hardwired human engineered features because the, the problem, the, the mental jump that people are making there is they’re saying, okay, animals have a lot of innate machinery. Therefore we should give AI and AI machinery, but they’re forgetting the animals.  </p>



<p>Blake    00:48:57    And AI machinery was delivered Kersey of an optimization routine rather than a human engineer. And this is the problem because human engineers suck at delivering the kind of things that you need for AI. That’s what we discovered over the course of 50 years of failed AI research. So, you know, although everything that is Tony’s saying is true, animals have all this innate machinery and, you know, a squirrel probably doesn’t they tweak whatever existing programming is there in order to learn how to jump from tree to tree, that doesn’t then mean that the solution to AI is for us to sit down and try and be like, Hmm. Okay. Let’s think.  </p>



<p>Tony    00:49:36    Absolutely. No, I think, I think that’s absolutely right. Like we have this innate machinery and if we’re going to try to engineer it, the solution isn’t. So, so far we, we were, we were given two choices, right? One choice is to hand engineer. Those features either by using your imagination or possibly by looking at the engineered features from animals that’s choice one, and choice two is to learn them de Novo, each time you train the system and I’m arguing there’s, uh, a third, uh, route, which is you, you lay a foundation of the sort of useful prior, right. And maybe you get them through, uh, an optimization algorithm. And frankly, you know, just because evolution got them through an evolutionary algorithm doesn’t mean that that’s exactly the algorithm we need to use. So in fact, you know, evolution is a lousy algorithm, right? Because it doesn’t use a gradient evolution works because it operated over.  </p>



<p>Tony    00:50:40    I want to try to do a back of the envelope calculation on this, like tend to tend to the 30 individuals have contributed to, um, our, our genome, even with really fast GPU. It’s going to be a while before we can sort of simulate 10 to the 30, uh, organisms and use the outcome of that as the basis of, um, of, of our, of our system. So, no, I mean, the, the insight that we have was that gradients are really useful for finding your way around the high dimensional space, right? So if we’re going to engineer, if we’re going to recapitulate pollution, right, we’re probably going to have to do it using gradient. Then the idea is that we shouldn’t have to redo that each and every time we train the network, we should sort of figure out some kind of collection of foundational structures, right?  </p>



<p>Tony    00:51:38    Each time we train a network, uh, usually, and, you know, there’s been some recent work on, um, you know, not starting from scratch each time, especially with, uh, language networks, because there are basically, we have no choice, right? Cause you know, training, one of these causes the lights to dim and Boston for a couple of days or requires that much energy and compute. So it, it seems like at some point you can’t keep retraining from scratch each time. But I think that, that the lesson there is far more general and we have to sort of figure out how to reuse the training that we’ve done over and over again in, in a, in a useful way. So, you know, when, when I was a kid, we used to, um, uh, stump each other by asking, do you walk to school or carry your lunch? And it’s a false dichotomy, right? That the choice between learning and, um, exploding in eight structures is a false dichotomy answers. We should do both.  </p>



<p>Paul    00:52:41    Do you think though, we’re anywhere near understanding the capacity? So thinking about the high dimensional structure, right. Um, of, you know, 86 billion neurons, but do you think that we are near, uh, anywhere near appreciating, um, the actual heavy, heavy lifting that evolution has done to, uh, create that I particular high dimensional space, right? Where are these amazing general, uh, learning things? And it’s amazing the different types of things that we can learn and recombine, but on the other hand, constraint like Blake was saying is super important. And do we, do we appreciate that high dimensional structure enough? Or do we think, okay, it’s so high dimensional, it can just do anything. I think most people  </p>



<p>Blake    00:53:25    Recognize the importance of the high dimensional structures that have been optimized by evolution for the unique properties of human thought. And certainly anyone who’s tried optimizing neural networks for any lengthy period of time will appreciate just how amazing the product that evolution has produced is because you can get a lot of really cool, funky, amazing behaviors with gradient descent, but getting the unique mix that you see in animals in general, not just people is turning out to be remarkably difficult. And so, um, I think anyone who, who has spent some time with them and with these, these optimization procedures will respect evolution’s contribution quite a bit. Yeah.  </p>



<p>Doris    00:54:13    Yeah. And there’s, yeah, you guys have turned target how you can build it, use evolution to build the most powerful machines. Um, you know, as a neuroscientist, my interest is really to understand the brain and, um, like there’s different ways of understanding, right? There’s, there’s like this bad right now of, you know, um, regressing activity of neurons to units and deep networks. That’s one type of understanding, you know, I think the deep understanding it’s going to require understanding those structures, right. It’s sort of like, um, if you take a simpler example, you know, can calculate, what is the probability of getting two heads and a tail if you flip a coin three times, right? So you could figure that out by doing a Monte Carlo simulation, you could figure that out by writing out all the outcomes, right. HHT HTT so on, or you could figure that out by actually understanding the structure, the binomial distribution. I think all of us would agree that the last form of understanding is this real understanding and system really like just taking a neural network, like that’s not going to and regressing and saying, it explains what ever percent variance like that. That’s not totally satisfying. Right.  </p>



<p>Blake    00:55:19    I agree with that. And I just want to say, I think sometimes, um, there is an unfortunate tendency for people to think of the contribution that, uh, machine learning can make to neuroscience as being fully encapsulated by that approach that just regresses neural activity, uh, against deep neural networks. And I think that provides us a bit of understanding as Doris said, but in my mind that is only really effective and a tool for good as a tool for gaining understanding if you’re using it to answer other questions with the neural network. So simply showing that you have a network that you can regress well against the data is itself. Not necessarily that informative. It doesn’t tell you nothing, but it’s not necessarily that informative, but instead, what you want to do is you then want to use those models to, as it were, understand the distribution, and to try to think about the principles by which you can get models that are better fits to brains. Um, and, and it’s only by taking that principled approach and using these models as normative guides that we get to something like real understanding, simply doing the grip regressing itself is not, I agree with Doris sufficient for understanding, and it’s also not. And this is ultimately my point, the entirety of the program that, you know, are neural networks and machine learning and neuroscience have to offer neuroscientists.  </p>



<p>Doris    00:56:44    I think Tony’s essay also for me personally, like introduced another dimension of understanding, right? Understanding how the genome encodes these structures that enable learning, like, you know, and it sort of always like festered in the back of my mind. And I heard the statistic that the genome, you know, you can put it on a CD rom or something, and it seemed kind of incredible, but I never like Tony, like really worked out the implications of that. Right. Like you have to specify all of these learning rules in that, in that, um, CD. So that’s, um, yeah, like, like I feel like if we’re going to understand the brain, like we have to understand that question too. You know, like Mara had the famous three levels that this is, I feel like a fourth level. Like how does this computational structure, how do you actually wire it up with the small amount of information?  </p>



<p>Tony    00:57:30    Uh, in some sense, it’s I consider good news because it means that the best, there was one concern that I’ve always had is that the brain is just infinitely complicated. You know, there’s this bag of tricks idea that basically it’s just a collection of clutches. And although there’s clearly some of that going on, right. Clearly a lot of specialized adaptation, um, that you’ll only understand if you really, really dig, dig, dig, dig very deep. The overall description length of the entire circuit is just not that long. And you know, there’s an upper limit, which is the size of the genome, but it’s not being optimally used in some sense. And not all of the genome is used to specify the brain. So, you know, the difference between actually I did another calculation, the difference between a, um, human brain and a macaque brain actually does fit, um, and old school floppy did. So it’s, you know, of order one megabyte  </p>



<p>Paul    00:58:34    Doesn’t know what you’re talking about.  </p>



<p>Tony    00:58:39    Um, so, uh, you know, it’s, I think it’s good news that the things that make us special as humans, it’s really not that much right now. You know, one megabyte of this stuff could actually have a huge impact, but to figure out what that, uh, to write it down, it turns out probably not to be so hard.  </p>



<p>Paul    00:59:01    Well, this is why I brought up development earlier. And, um, I mean, I’m just unbiased because of my recent conversation with people like Robin, he’s a singer. Um, and well, other people I’ve had on the, on the podcast recently, Kevin Mitchell, for instance, um, that the, their argument is that we’re actually missing, like what is actually specified in the genome. Isn’t the rules for connections. It is the rules for development and that through the developmental process, that algorithm actually changes.  </p>



<p>Tony    00:59:30    Yeah. Yeah. That’s implicit and say, it’s not. So it’s clear that we will not like dig into the genome and suddenly find the part where you can unpack the matrix that reflects the co you know, the connector. I don’t, you know, not, not the recent movie, but the connectivity matrix among, among neurons, right? Like we’re not going to, ah, there, you know, let’s just do Jesus on this. No, it’s not. Um, it’s not simply a bunch of, of synaptic weights, even, probably in C elegans where you could actually lift all the weight, see elegance being a worm as 302 neurons and 7,000 synapses. Right. Even though the C elegans genome is a little smaller than ours, um, that kinda activity matrix for the, uh, circuit, the entire reign circuit of the worms C elegans would fit comfortably into the genome, but that’s still not how it’s done.  </p>



<p>Tony    01:00:30    Right. So developmental rules are, you know, interesting and complicated, but there are rules, right? So at no point would you expect to find a list of connections within, uh, within a genome. So I, I think, I think we’re all in agreement that the way you get from genomes to circuit is via interesting developmental rules. Um, whether that, whether understand, like, I think understanding those rules is fascinating in its own, right? Whether that will be the path to understanding, um, you know, the computational roles of neural circuits. I don’t know. I’m, I am getting increasingly interested in development in the hopes that maybe it will provide insight, but you know, there, there are possibly many ways of figuring out how to make a brain that, that compute or how, how computation in the brain is, is sort of instantiated into circuit.  </p>



<p>Paul    01:01:32    Tony, how has your paper aged? I had you on, I was like two years ago or something. I don’t remember. It was forever ago. Uh, but, um, yeah, what’d you do, would you have written anything differently in the paper at this point, or do you still stand by the original message and  </p>



<p>Tony    01:01:48    It’s sort of laid down the, um, certainly the path that my lab is going to be taking in this, in this domain. So, you know, that was, you know, it was an essay, it was full of, uh, ideas and observations, but not actual work, but for me, like the research program that, uh, is suggested by it is to figure out how to actually compress, uh, an artificial neural network wiring diagram into a quote unquote, a genome. And that when you ask, what am I thinking about that on a day-to-day basis, what I’m thinking about and you know, that that’s the nitty gritty of it and it’s been, um, it’s been a lot of fun. It is a lot of fun, um, to see how we can do that. So, um, but I think, I think there’s, you know, if I were to ask, I’ve been talking to actually Blake about this recently is to think a little bit harder about the role of evolution in all this. I think, um, you know, how to actually fit that in is like, I, I don’t have a clear idea yet, but in terms of a path forward, um, that that’s something that, that I’ve certainly been thinking a lot about.  </p>



<p>Paul    01:03:04    Do we understand evolution itself well, enough to think about those things?  </p>



<p>Blake    01:03:08    I feel like we understand the principles of evolution pretty well. You know, it’s, for me, the biological equivalent to Newtonian mechanics, it is the core insight that allowed us to build all the rest of the conceptual scaffolding and general approach to doing the science. And so, you know, I’m sure there are still tons of things that people are learning about evolutionary processes every day, but the fundamental mechanism is very clear and we can simulate it. We can show that you get all sorts of interesting things that, you know, are explain a variety of facets of biological life. And, um, so though there’s more to discover. I think if, if we don’t admit to knowing to understanding something about evolution, then I’m not sure what we do understand  </p>



<p>Tony    01:04:03    I’m with you there.  </p>



<p>Paul    01:04:05    All right. As, as organizers of the nicest, uh, conference this year, um, I will just put your feet to the fire. Where do you guys think complete speculation? Of course. Where do you think we are on the fad curve of what, uh, what is sometimes called neuro AI that has, well, go ahead,  </p>



<p>Tony    01:04:24    Hardly a fad. I would say that it’s just the opposite. It’s uh, after a neuro AI winter, we are experiencing, um, the, the neuro AI spring. So we’re returning to, uh, you know, we’re returning to our roots. So it’s, I think that it deserves, I think neuroscience deserves to be a part of AI and vice versa, and we’re just hopefully gonna kind of catalyze that, that return. Yeah.  </p>



<p>Blake    01:04:59    I, I agree with that. And I think the only caveat I’d add, and this is why sometimes you can talk about fads and I say this with all due respect to anyone in California listening or here on the call. Um, sometimes the, the fad machine that people are picking up on is not what’s going on in academia or even industrial research, but the fad machine, that is what comes out of Silicon valley, venture capital culture and stuff like that. And there, I think we probably have passed an inflection point. If you just look at the number of searches online for deep learning, it’s down from a few years ago. If you look at the extent to which people are throwing money at anyone who says they have a company that does deep learning that’s down from a few years ago. So there’s some business cycle fad that maybe is slightly on the wane, but I think the longterm business trend, and certainly as Tony articulated, the long-term scientific endeavor is not a fad has never been a fad and will continue to pick up pace as we start to figure more and more out.  </p>



<p>Doris    01:06:06    I completely agree. I mean, I feel like this, this neuro AI is as fundamental as, you know, physics or chemistry. It’s, you know, the study of intelligence perception, all of these. Um, I would think that some of us, you know, that’s what we care about. It’s, um, it’s so fundamental. Like it’s, you know, there there’s certainly like fads, I mean, in, in how you analyze data using neural networks and so on, that’s all true. But, um, yeah, the fundamental quest to understand intelligence that can be called a fed.  </p>



<p>Paul    01:06:36    All right. One last question. I know you guys have to go, uh, for each of you do the problems that you are working on. Do they feel bigger or do they feel smaller than when you began working on them or as, as you continue to work on them?  </p>



<p>Doris    01:06:51    For me, they definitely feel that there’s so much bigger. You know, when I first started recording from face cells, the question was like, what, what drives this space, Sally, versus the eyes, the shape of the face or side, we’ve pretty much figured that out, you know, the cells are driven by shape and appearance features. Um, so now we’re asking questions, like how does brain generate a conscious perceptive of face or can the brain, how do you imagine a face? Um, how do you, how do you learn a face in an unsupervised way it’s, um, holding a new realm of questions to,  </p>



<p>Tony    01:07:23    Yeah. Going to when I started graduate school, I had this fear that, um, I wouldn’t, I wouldn’t train up fast enough. Um, and that, by the time I sort of understood enough to do useful work, all the problems would be solved. So that turned out not to be quite, uh, so in that sense, the problem seem, uh, constantly to get bigger. I thought the problem was pretty small when I started. Uh, and, and, you know, I, I thought that it was kind of like training up as a physicist in the late in the mid twenties, right? Like, because there was this sudden moment where, you know, everything got figured out in quantum mechanics. And if you got your degree in, you know, 1929, you’d missed the boat. I figured that that’s how it was going to be, uh, turns out we haven’t, we haven’t quite hit that inflection point. So the problems remain as bigger, bigger than when I started graduates.  </p>



<p>Paul    01:08:26    Cool. Blake, I’d love to hear a dissenting voice here that they’re all the problems seem tiny. Now  </p>



<p>Blake    01:08:33    I’m afraid I can’t give you that kind of descent. I mean, what I’ll say though is, um, I think that as the field matures, uh, what’s interesting is that you get to the point where though the problems can seem bigger and are bigger. You feel, at least me, I feel a little bit more like I have some of the conceptual tools to tackle them. And so though it seems like there’s more work to do, and the problems are bigger. I don’t feel the same sense of like, well, how the hell are we going to do this at all that I felt maybe like, you know, 15 years ago when I was starting my, uh, graduate school, like that’s, that’s a radically different scenario that way, um, to feel like we actually have some of the conceptual and experimental tools necessary to tackle these problems that do indeed seem bigger to me now.  </p>



<p>Paul    01:09:33    Well, considering that a 99.999% of the organisms couldn’t be with us today because of that bastard Lee evolutionary optimization algorithm. I really appreciate, uh, this little sliver of humanity being with me. Thanks guys for joining me.  </p>



<p>Tony    01:09:48    Thank you for much.  </p>



<p>Paul    01:09:55    Brain inspired is a production of me and you had don’t do advertisements. You can support the show through Patrion for a trifling amount and get access to the full versions of all the episodes. Plus bonus episodes that focus more on the cultural side, but still have science go to brand inspired.co and find their red Patrion button there to get in touch with me, emailPaul@brandinspired.co. The music you hear is by the new year. Find them@thenewyear.net. Thank you for your support. See you next time.  </p>

</div></div>


<p>0:00 – Intro<br />4:16 – Tony Zador<br />5:38 – Doris Tsao<br />10:44 – Blake Richards<br />15:46 – Deductive, inductive, abductive inference<br />16:32 – NAISys<br />33:09 – Evolution, development, learning<br />38:23 – Learning: plasticity vs. dynamical structures<br />54:13 – Different kinds of understanding<br />1:03:05 – Do we understand evolution well enough?<br />1:04:03 – Neuro-AI fad?<br />1:06:26 – Are your problems bigger or smaller now?</p>
]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/d66e97d6-3ffd-41f9-922b-38434adcdc55-125-Doris-Tony-Blake-NAISys.mp3" length="68551118"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[




Support the show to get full episodes and join the Discord community.







Doris, Tony, and Blake are the organizers for this year’s NAISys conference, From Neuroscience to Artificially Intelligent Systems (NAISys), at Cold Spring Harbor. We discuss the conference itself, some history of the neuroscience and AI interface, their current research interests, and a handful of topics around evolution, innateness, development, learning, and the current and future prospects for using neuroscience to inspire new ideas in artificial intelligence.







From Neuroscience to Artificially Intelligent Systems (NAISys).Doris:@doristsao.Tsao Lab.Unsupervised deep learning identifies semantic disentanglement in single inferotemporal face patch neurons.Tony:@TonyZador.Zador Lab.A Critique of Pure Learning: What Artificial Neural Networks can Learn from Animal Brains.Blake:@tyrell_turing.The Learning in Neural Circuits Lab.The functional specialization of visual cortex emerges from training parallel pathways with self-supervised predictive learning.


Transcript

Tony    00:00:04    Goal of the conferences to bring machine learning and computational neuroscience back together. Again, a lot of the, uh, major insight in deep learning and artificial intelligence came from neuroscience. In fact, you could, you could basically say that almost all of them.  



Blake    00:00:26    There has been a lot of interest in the computational neuroscience community in bringing machine learning and AI back on board, but the other direction has yet to be fully recouped. So that direction of taking inspiration from the brain to build better AI systems is precisely the gap that I think we wanted to fill with this conference. And which is arguably still a gap.  



Doris    00:00:53    I mean, I, I feel like this, this neuro AI is as fundamental as, you know, physics or chemistry. It’s, you know, the study of intelligence perceptional, these, you know, there there’s certainly like fads, I mean, in, in how you analyze data using...]]>
                </itunes:summary>
                                                                            <itunes:duration>01:11:05</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 124 Peter Robin Hiesinger: The Self-Assembling Brain]]>
                </title>
                <pubDate>Wed, 05 Jan 2022 16:35:41 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-124-peter-robin-hiesinger-the-self-assembling-brain</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-124-peter-robin-hiesinger-the-self-assembling-brain</link>
                                <description>
                                            <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2022/01/art-124-01.jpg" alt="" class="wp-image-1621" />



<p class="has-text-align-center"><strong>Support the show to get full episodes and join the Discord community</strong>.</p>



<div class="wp-block-image"><a href="https://www.patreon.com/braininspired"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2021/09/become_a_patron_button@2x.png" alt="" class="wp-image-1434" /></a></div>



<div class="wp-block-image"><img width="115" height="149" src="https://braininspired.co/wp-content/uploads/2022/01/robin2015.jpg" alt="" class="wp-image-1622" /></div>



<p>Robin and I discuss many of the ideas in his book <a href="https://www.amazon.com/gp/product/0691181225/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=0691181225&amp;linkId=7d9b2d30de97530b49d1c6e179db0c3f">The Self-Assembling Brain: How Neural Networks Grow Smarter</a>. The premise is that our DNA encodes an algorithmic growth process that unfolds information via time and energy, resulting in a connected neural network (our brains!) imbued with vast amounts of information from the “start”. This contrasts with modern deep learning networks, which start with minimal initial information in their connectivity, and instead rely almost solely on learning to gain their function. Robin suggests we won’t be able to create anything with close to human-like intelligence unless we build in an algorithmic growth process and an evolutionary selection process to create artificial networks.</p>



<div class="wp-block-image"><img width="200" height="302" src="https://braininspired.co/wp-content/uploads/2022/01/the-self-assembling-brain-book-cover-sm.jpg" alt="" class="wp-image-1624" /></div>



<ul><li><a href="http://www.flygen.org/">Hiesinger Neurogenetics Laboratory</a></li><li>Twitter: <a href="https://twitter.com/HiesingerLab">@HiesingerLab.</a></li><li>Book: <a href="https://www.amazon.com/gp/product/0691181225/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=0691181225&amp;linkId=7d9b2d30de97530b49d1c6e179db0c3f">The Self-Assembling Brain: How Neural Networks Grow Smarter</a></li></ul>


<div class="lightweight-accordion"><span>Transcript</span><div class="lightweight-accordion-body">

<p>Robin    00:00:03    There’s only one way to do it. You need to let it grow. You need to lead an amount of information unfold that you then need. If you wanted to describe that end point bit by bit would be quite a lot. But if you would only want to describe the information needed to grow, it would be very little, but you can’t predict from the little information what the end point would look like without actually growing it. And the genome is always just there. It’s like a book that’s always there and you always just need to decide what to read in that book and to, to access that book. It’s just enormously complicated. You can’t just open page 255. You literally need a very strange combination of say 30 different proteins that are super unlikely to ever exist at the same time in the cell. But if they do,  </p>



<p>Speaker 2    00:00:58    This is brain inspired.  </p>



<p>Paul    00:01:11    That was the voice of Peter Robyn. He’s a singer who recently authored the book, the self-assembling brain, how neural networks grow smarter. Hi everyone. I’m Paul. And today I talk with Robin about a handful of topics in the book. Robin is a neurobiologist or a neuro geneticist, more specifically at free university of Berlin studying among other things, how DNA and the developmental process in codes, the wiring of brains in the fruit fly Drosophila. The central theme of his book is that current artificial intelligence and perhaps a current neuroscience theories are leaving out an essential part of what makes us intelligent. And that’s the growth and development of our br...</p></div></div>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[




Support the show to get full episodes and join the Discord community.











Robin and I discuss many of the ideas in his book The Self-Assembling Brain: How Neural Networks Grow Smarter. The premise is that our DNA encodes an algorithmic growth process that unfolds information via time and energy, resulting in a connected neural network (our brains!) imbued with vast amounts of information from the “start”. This contrasts with modern deep learning networks, which start with minimal initial information in their connectivity, and instead rely almost solely on learning to gain their function. Robin suggests we won’t be able to create anything with close to human-like intelligence unless we build in an algorithmic growth process and an evolutionary selection process to create artificial networks.







Hiesinger Neurogenetics LaboratoryTwitter: @HiesingerLab.Book: The Self-Assembling Brain: How Neural Networks Grow Smarter


Transcript

Robin    00:00:03    There’s only one way to do it. You need to let it grow. You need to lead an amount of information unfold that you then need. If you wanted to describe that end point bit by bit would be quite a lot. But if you would only want to describe the information needed to grow, it would be very little, but you can’t predict from the little information what the end point would look like without actually growing it. And the genome is always just there. It’s like a book that’s always there and you always just need to decide what to read in that book and to, to access that book. It’s just enormously complicated. You can’t just open page 255. You literally need a very strange combination of say 30 different proteins that are super unlikely to ever exist at the same time in the cell. But if they do,  



Speaker 2    00:00:58    This is brain inspired.  



Paul    00:01:11    That was the voice of Peter Robyn. He’s a singer who recently authored the book, the self-assembling brain, how neural networks grow smarter. Hi everyone. I’m Paul. And today I talk with Robin about a handful of topics in the book. Robin is a neurobiologist or a neuro geneticist, more specifically at free university of Berlin studying among other things, how DNA and the developmental process in codes, the wiring of brains in the fruit fly Drosophila. The central theme of his book is that current artificial intelligence and perhaps a current neuroscience theories are leaving out an essential part of what makes us intelligent. And that’s the growth and development of our br...]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 124 Peter Robin Hiesinger: The Self-Assembling Brain]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2022/01/art-124-01.jpg" alt="" class="wp-image-1621" />



<p class="has-text-align-center"><strong>Support the show to get full episodes and join the Discord community</strong>.</p>



<div class="wp-block-image"><a href="https://www.patreon.com/braininspired"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2021/09/become_a_patron_button@2x.png" alt="" class="wp-image-1434" /></a></div>



<div class="wp-block-image"><img width="115" height="149" src="https://braininspired.co/wp-content/uploads/2022/01/robin2015.jpg" alt="" class="wp-image-1622" /></div>



<p>Robin and I discuss many of the ideas in his book <a href="https://www.amazon.com/gp/product/0691181225/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=0691181225&amp;linkId=7d9b2d30de97530b49d1c6e179db0c3f">The Self-Assembling Brain: How Neural Networks Grow Smarter</a>. The premise is that our DNA encodes an algorithmic growth process that unfolds information via time and energy, resulting in a connected neural network (our brains!) imbued with vast amounts of information from the “start”. This contrasts with modern deep learning networks, which start with minimal initial information in their connectivity, and instead rely almost solely on learning to gain their function. Robin suggests we won’t be able to create anything with close to human-like intelligence unless we build in an algorithmic growth process and an evolutionary selection process to create artificial networks.</p>



<div class="wp-block-image"><img width="200" height="302" src="https://braininspired.co/wp-content/uploads/2022/01/the-self-assembling-brain-book-cover-sm.jpg" alt="" class="wp-image-1624" /></div>



<ul><li><a href="http://www.flygen.org/">Hiesinger Neurogenetics Laboratory</a></li><li>Twitter: <a href="https://twitter.com/HiesingerLab">@HiesingerLab.</a></li><li>Book: <a href="https://www.amazon.com/gp/product/0691181225/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=0691181225&amp;linkId=7d9b2d30de97530b49d1c6e179db0c3f">The Self-Assembling Brain: How Neural Networks Grow Smarter</a></li></ul>


<div class="lightweight-accordion"><span>Transcript</span><div class="lightweight-accordion-body">

<p>Robin    00:00:03    There’s only one way to do it. You need to let it grow. You need to lead an amount of information unfold that you then need. If you wanted to describe that end point bit by bit would be quite a lot. But if you would only want to describe the information needed to grow, it would be very little, but you can’t predict from the little information what the end point would look like without actually growing it. And the genome is always just there. It’s like a book that’s always there and you always just need to decide what to read in that book and to, to access that book. It’s just enormously complicated. You can’t just open page 255. You literally need a very strange combination of say 30 different proteins that are super unlikely to ever exist at the same time in the cell. But if they do,  </p>



<p>Speaker 2    00:00:58    This is brain inspired.  </p>



<p>Paul    00:01:11    That was the voice of Peter Robyn. He’s a singer who recently authored the book, the self-assembling brain, how neural networks grow smarter. Hi everyone. I’m Paul. And today I talk with Robin about a handful of topics in the book. Robin is a neurobiologist or a neuro geneticist, more specifically at free university of Berlin studying among other things, how DNA and the developmental process in codes, the wiring of brains in the fruit fly Drosophila. The central theme of his book is that current artificial intelligence and perhaps a current neuroscience theories are leaving out an essential part of what makes us intelligent. And that’s the growth and development of our brains neural networks. And in the case of deep learning and artificial neural network, Robin suggests we don’t yet appreciate how information which begins at a relatively low level in coded in our DNA, unfolds and increases with time and energy.  </p>



<p>Paul    00:02:10    As our networks are formed something, Robin calls, algorithmic growth and his claim is that it’s essential to include a growth and an evolutionary selection process to get anywhere close to building something with human, like intelligence, uh, if that’s even what we want. So we talk about that central theme and many of the issues that arise out of it. And as you’ll hear, the book is also a history lesson about the parallel yet independent paths of AI and developmental neurobiology show notes are at brain inspired.co/podcast/ 124. Thanks to all my patron supporters, if you like this sort of thing, and you want access to, uh, all the full episodes and to join our brand inspired discord group, uh, you can do that on the website also@brandinspired.co all enjoy Robin Robin. I enjoyed the book immensely. So we’re going to talk all about it. One of the things that, uh, I was surprised about when I read it, given that I had seen one of your lectures was the amount.  </p>



<p>Paul    00:03:13    So of course you dive into the history of AI because the book is all about how AI might be missing something, right. But what I was surprised about and thoroughly enjoyed as well was the history of, uh, developmental neurobiology and how you weaved that history into, uh, the history with, uh, of AI and how they happened in parallel. And of course, you know, hadn’t really spoken to each other, uh, for many years. So, uh, and you did it in a format that was sort of a storytelling format, so it was very easy to digest. So, uh, congratulations, and thanks for writing the book.  </p>



<p>Robin    00:03:48    Thank you so much for, for having so much good things to say about it. Yeah, it is a unusual angle and it’s because I’m coming from kind of left field, if you will. Right. I’m a neurobiologist. I, um, you know, I dabbled a bit in formatics and I dabbled a bit in philosophy as a student, but, uh, I’m running a research lab, um, teaching undergraduate and graduate students, and we’re publishing papers on how genes and coat the information to wire a brain. And, uh, so the, the, the origin of this somewhat unusual approach and of the parallel storytelling of a field of neurobiology and artificial intelligence really kinda originates with me being, I guess, somewhat unhappy with my own field in biology because, oh, you know, we started many years ago in the very successful molecular revolution to publish more and more papers on individual genes, their products and their roles in how the brain develops and how it functions.  </p>



<p>Robin    00:04:55    And it’s led to a data explosion. And, you know, my very good friend and colleague, uh, Bassam Hassan says, uh, you know, we’re, we’re increasing the amount of knowledge at an unbelievable pace, but, uh, or an increase of understanding is not keeping up. And so, so I was wondering whether we’re missing something. I was wondering whether we need some kind of theory, whether we need some kind of information theoretical background, what does it even mean that the genes and code the brain? I mean, that’s obviously a very loaded, strange question. And then of course you can’t avoid in this day and age being bombarded by the news about some kind of allegedly superintelligence systems, you know, seemingly overtaking us next week. So what do they know that that’s when I started to feel like, okay, maybe these people know something that I don’t and I realized, and that’s where the historical part comes in, that the history of AI is unbelievably fascinating.  </p>



<p>Robin    00:05:58    My goodness, these people have been at each other from very different fields for so many decades. And for most of those decades, they really wanted to have nothing to do with the brain or neurobiology at all. And all the successful approaches that we hear about today, like unbelievable success of AI that we have right now is actually all based on what is called artificial neural networks. And they’re closer to the brain than any approach in AI has ever been in history. So that gotta be interesting, right? I mean, if they suddenly take an idea that we have been studying as biologists for so many years and they can do with it, things that we can’t do, and then we feel we can also do some things that they can’t do. Maybe we should talk to each other. So this is how it started.  </p>



<p>Paul    00:06:46    Ah, so I was imagining that as a neurobiologist, you saw the deep learning revolution, the neural networks, and essentially you said, whoa, they have a lot to learn from me, but in fact it was kind of the other way around when you first, uh, poked your nose into it, thinking, what do they know that, that, I don’t know.  </p>



<p>Robin    00:07:04    Absolutely. I mean, the, the, the, the idea that, you know, oh, they should be listening to us, um, dangerously close to ignorance, any kind of approach, right? Because you need to first understand what the other field is doing, and it’s a very rich field. And so this is exactly why I had to go into the history. I mean, when I started, I just want to say, okay, what are they doing right now? What is interesting about this? And I felt like, okay, deep learning is really cool. Recent approaches in reinforcement learning make perfect sense to me. It even has an evolutionary angle. This is even more biological than anything, right? I mean, babies clearly learn with some kind of reinforcement learning. So that makes some sense. So that was all very good. But then you start to wonder, why are they doing certain things? Not at all that, you know, I’m actually spending my life study.  </p>



<p>Robin    00:07:57    And the thing that came to the forefront at that moment in my thinking of course, was wait, I’m in neural geneticists. I am studying how genes and cold or growth process to make a neural network. And that thing’s actually pretty smart before you start learning anything. I mean, we can talk about examples later on from the insect road to, we know when a baby is born, it’s not one network that starts to learn exactly what all pretty much, all deep learning approaches to today. There’s no genome, there’s no growth process that turned on to learn. And that made me then wonder, okay, why is it that they’re doing one thing that we know from biology, but not the other, why is it that they initially didn’t even want to use neural networks to start with? And when I started to understand their history, I had like so many moments of yeah.  </p>



<p>Robin    00:08:54    You know, these kinds of problems I’ve seen before we had that in biology. We had a time when people felt like the human brain is so amazing and our thinking and learning, you know, it got to the genome, you know, beginning it wasn’t of course even not known what the genome looks like. Um, it had to be learned, right? And so this debate on, on learning on nature versus nurture on how much can actually get into a network via growth process. And to what extent you would even need, it has been there in the neurosciences as well, um, some time ago, um, but in one form or other, it’s still there. And so this is very interesting, right? So to see them, the parallels between those fields to see how, um, from my perspective, to some extent, the AI community is actually retracing. Some steps that historically neuroscientists have taken made me wonder that, um, you know, there is something again that we need to talk about. We should, and I wondered how much we are. And it turns out there was very little crosstalk. And so this is how the book originated.  </p>



<p>Paul    00:10:03    So I want to talk just for a moment more about the book itself before we get into the ideas that are in the book, because, uh, I’m curious for one thing that was a very rich and detailed history that you told of the developmental process, was that a lot of work or is that, um, was that easy to piece together? Because I, I know the AI history, or I knew the AI history quite well because it’s been told a few times, but, uh, and, and I even learned more about it through your writing, but what I didn’t know about was, um, that rich history in, um, developmental neurobiology, did you just know all that, or was that a lot of research that you had to do to, to write that  </p>



<p>Robin    00:10:42    It’s both, I mean, it came first of course, right? I mean, it is my field. Um, but you actually touching quite an exposed nerve. I mean, if we go into, into, if you open textbooks, there’s a lot of knowledge that is being communicated without communicating the field’s history. And there are some debates that just appear ludicrous today, right? I mean, the, the, the, the whole debate, you know, this is how the whole problem, how many moment, many of my lectures and the book start is that people couldn’t even agree whether in neuron as a physiological unit, because nobody could understand how he could have so many of those things wiring together. Right? And so this seems, of course, it’s a completely settled debate. Of course, we know that the brain is made up of neurons and their physiological units, and somehow they need to connect up to make something that works and appear smart to us.  </p>



<p>Robin    00:11:28    Um, but this, you can only understand if you actually have the historical context. And of course I knew some historical context, um, but probably not more than any other, somewhat trained developmental biologists. And when I started worrying about my own field, this actually happened before any deep diving into, into AI history. And, you know, how, w where are we at right now? Why are we just collecting gene after gene and molecular function after molecular function and publishing more papers every year than ever in the history of developmental neurobiology before? And are we actually getting closer to understanding how that thing is put together? So when I started worrying about this and wondering about this that’s of course, when I, when I initially went back in the history of where this came from, and there’s a very cool story to be told that’s of course, in the book of, of an interesting break point in history where a very famous neuroscientist, Roger Sperry went ahead and said, you know what?  </p>



<p>Robin    00:12:31    The embryonic development will make a brain out of individual neurons live with it, and they gotta be molecular interactions. That item middly define the development and the growth of that network. And that was at a time in the thirties, forties and fifties, predominantly when most scientists, including the famous, even more famous supervisors of, of, of this, uh, gentlemen, um, were on the other side of the spectrum and said, uh, you know, it gotta be all somehow induced by some kind of plastic form of learning. And, you know, it was a field of psychology brain wiring was at that time, uh, the subject of psychology and not the subject of, you know, today, like molecular geneticists, they didn’t even exist really at that time. And so, so this is where it all started and, uh, it’s been good and bad. Um, of course we know that, you know, there are a lot, some say an infinite number of instances of individual molecular functions in different contexts and specific cells and specific animals at specific developmental stages.  </p>



<p>Robin    00:13:44    I mean, you know, there’s no limit to the depth of this that you can study and publish papers on, but how the genome actually does that, you know, famously the genome contains one gigabyte of data, and they have people in the AI field like Schmidhuber and others who actually use it as an argument and saying, look, you know, one gigabyte of data that can encode anything. So, you know, we don’t even need to look there. Clearly we only need learning. We don’t need the genome, but then again, of course, what we all doing, maybe not looking at the whole forest at a time, but always like looking at some individual leaf or needle of searches, one tree insight that forest is looking at how the thing unfolds. And we do know that an apple seed will grow an apple tree. This is the job of developmental biology and the development of the brain is no different. And so, uh, this is basically where we’re coming from, and this is where the history became so important and has a huge impact on what we do today.  </p>



<p>Paul    00:14:51    So, so I will read from a quote from your book, I guess, to start us off here, because, uh, just dovetailing off of what you were just mentioning this then is the core question of our workshop. What difference does it make for the intelligence of a neural network, whether connectivity is grown in an algorithmic process that is programmed by evolution versus only learned based on a designed network with initially random connectivity. So you were just talking about people like Schmidt, Huber, um, and not to single him out because it’s the entire AI community, essentially that it’s, um, after reading your book, it’s, it’s, it’s just a curiosity that, uh, we all I’ll include myself, right? Because, you know, at first pass it’s like you have this brain, you’re the network. And then all you need to do is learn from, from there. And that’s what intelligence is about. Uh, but your book makes the argument or asks the question, uh, that may be the growth process itself from the genetic code, uh, is an important part of that process. And of course there are debates in, um, the deep learning field, uh, how, how important, you know, inductive biases are and the architectures that you use and whether that matters. Uh, but, but I w I would guess that you would say that that’s, that’s not enough.  </p>



<p>Robin    00:16:07    Yeah, exactly. That’s exactly what I would say, but I would also say it’s a step in the right direction, you know, ideas that like convolutional neural networks that are basically mimicking, you know, a little bit of the wiring of a visual cortex in mammals, um, or, you know, the new proposal that just made somewhat of a splash by Jeff Hawkins in his thousands brains, um, is of course that, you know, we got to design these things like the cortex, and then you have like all these cortical columns, and then they can copy it and vote and all these things, right. And these are all very good ideas. Um, but they’re basically coming like tiny, tiny steps back from like the purely random network. And of course they’re still designed. So in biology, how you got to the cortical columns in the first place, it was of course through growth process.  </p>



<p>Robin    00:16:57    And the fact that there are cortical columns contains information. It’s not just, you know, randomly connected network that we have in our brain. Otherwise most of my neuroscience colleagues would be out of a job very quickly. They’re studying circuitry, we’re studying how the neurons are exactly put together. And it’s fabulous. I mean, you can study things like, like, uh, motion detection. Um, so there’s a beautiful example by understanding how exactly different types of neurons or certain delays and certain, you know, conduction velocities and, and synaptic strengths, and then some state dependencies where you have some neuromodulators that whole populations of neurons suddenly have a lower threshold, all kinds of stuff. And when you see how all of this is put together, suddenly you understand, you can even build a computational model based on that. And it tells you, yeah, you know, this network can see motion, but this is very different from teaching a completely random connected network motion detection, right?  </p>



<p>Robin    00:17:59    So the, to teach the randomly connected network will not lead you to all the both cellular connectivity. That is very specific that I just try to just, you know, like outlines describe, or the molecular aspects, which are, um, things like, I, I even mentioned something like a neuromodulator, um, these are molecules that are diffusing in whole areas of the brain and changing synaptic weights. And this is simply not included in the modeling off synaptic weight changes in an artificial neural network. So evolution has found solutions to problems that the brain can solve, like motion detection, like you name anything up to cognitive abilities in the human brain or down in insects. You know, my favorite example that I’ve seen is the Monarch butterfly that flies these 3000 lives. You’re crazy about butterflies. I really love those. They do so much. They’re like half a gram, and they can do so many things.  </p>



<p>Robin    00:19:06    If you wanted to train a neural network that is randomly connected to all the things that butterfly can do to achieve butterfly intelligence. I can tell you, we are far away from that, but, you know, without straight, this is the, this is the, this is the idea, right? So clearly learning from biology has become more accepted than ever in the history of AI before. Um, Jeff Hawkins famously was unhappy early on as a, as a young person, before becoming a billionaire that he wants to learn from the brain. And, you know, there’s a whole history, and this is why I tell that history, right in the book in AI research saying, you know, we don’t need all this messy wet stuff. And, you know, all these idiosyncratic solutions that evolution may have found a way we can design something from scratch. That’s better. And now we’re at a time where we kind of, you know, dipping in.  </p>



<p>Robin    00:20:00    I mean, when I say we actually talk about the AI community, deep dipping into ideas from the brain, like, you know, maybe we need critical columns, but it’s really just the tip of an iceberg because the brain is not just simply quite a can columns. It has all the molecular beauty that defines how individual neurons communicate with each other. And there is so much information, not just in the chronic activity of a specifically wired network, but also in the molecular composition that you cannot, if you want to simulate the way the human brain works or butterfly, BrainWorks just reduced to synaptic way to change. And so this is where I’m coming from. Well, I, I don’t know how much I agree with you that it’s more acceptable  </p>



<p>Paul    00:20:47    Now to include biological detail because there’s, and you document this in the book as well. There is, um, a constant drive in the AI community to abstract out as much as possible, right? So the idea that they, that you would need to include growth, and we’ll talk more about the details there in a moment, the idea that you would need to include growth from the gene must be horrendously, uh, unattractive to someone in the AI community. Who’s just trying to get their neural network to learn something, right?  </p>



<p>Robin    00:21:17    Yes. It’s very unattractive. Actually, we came, we did the experiment together with colleagues, just how unattractive it is. Um, if you just want to learn a specific task. And this brings us to a very interesting topic, right? I mean, artificial neural networks, um, based on reinforcement learning have been at this point more successful in almost any individual task a human could do that. I could imagine. I mean, obviously they can play better chess. They can do better visual image recognition. They can do, you know, better solutions to the cocktail party, problem, auditory, prison, whatever, right? So individual things, you can train these things to become better. If you now want to train the same networks, something else, um, you quickly have the problem of catastrophic forgetting. Um, and the AI community is trying to address that with, with deep learning approaches. And there are some good proposals.  </p>



<p>Robin    00:22:12    Now, the, the bottleneck in teaching in network, any of these things is still the training, obviously because the design is largely random, right? I mean, you may have some pre connectivity like in a convolutional or recurrent neural network, but really the key bottleneck is learning. So reinforcement learning takes a lot of time and energy to get that thing to be really good. Once it has learned it, you can just, you know, um, uh, deploy it and then it can decide whether it still should learn anything or not. And, uh, you know, but once it has learned something, it just can do, right. It can recognize images. It can make predictions about, you know, who should be your spouse or your next soap. Now, if you want to do this same job for a specific task with a neural network that is not trained by learning, but that is trained by a genome that produces the network.  </p>



<p>Robin    00:23:12    And then once you grow that network, you have it evolutionary selection for one that works back to the genome and iterations of this process, then you will soon find out that the time and energy that takes is even enormously bigger. So let me, maybe it’s really worth saying one more word about this, the AI that we know today, everything in any of these big Silicon valley companies that we’re all familiar with, all the stuff that we’re scared of or impressed with, it’s all, um, trained networks that have been trained in one or the other form, big data or reinforcement learning, deep learning stuff. Right. Um, but there is an AI community that actually does a very different type of learning off artificial neural networks. And it works like this. You take a genome, the genome defines, you know, in the easiest case directly synaptic weights.  </p>



<p>Robin    00:24:10    It’s not how biology works, but that’s the easiest approach you can take. You can say, I have one gene person NEPs, if you will, right? And then you, you can basically fill the synaptic weights of a recover matrix of, uh, of a recurrent neural network. Um, and then you can have that thing, do something, for example, let a little agent find a path through amaze or do image recognition, anything you want. And then if it does it, did it, well, then you take more of the genome that was at the base of this. And if it didn’t do it well, you just mutate more of the genome at the base of it, and then you do it again and then you do it again. And then you do it again, just like backpropagation is an iterative process where you train and train and train and classic, deep learning and reinforcement learning is actually very similar to the process.  </p>



<p>Robin    00:24:56    I just told you, right? Because you only learn from the end state of the system. So can you actually train the network, not by learning, but by keeping on randomly mutating, that genome that feeds the synaptic weight matrix, but that’s still without a developmental process. And if you now add a developmental process, this thing gets very quickly, even for the biggest computers on earth, out of hand, you’d have a genome that say, you know, few hundred genes is not even being done a few dozens. And then you basically feed that to, into some gene regulatory network that may have to go through a few hundred iterations of a developmental process that leads to the numbers that you fill in the way to matrix for the recurrent neural net. Right? Then you finally have a network that can do a task, perform something, see whether it’s good, and if it’s good, it’s good.  </p>



<p>Robin    00:25:48    You keep more of the genome. And if it was bad, you mutate more of it. Imagine this iterative process. So it’s a huge computational effort is orders of magnitude more computational power needed to even simulate a laughingly simple version of a genome and developmental process in an iterative evolutionary selection process. And the outcome is at this point in time, never better than the deep learning stuff. So therefore nobody’s doing it right. That’s not quite true, right? I mean, there are some academic scientists doing it, but then the question becomes, you know, where are we in this artificial neural networks based on deep learning. I mean, before there was deep learning, right before we had like this humongous amounts of data, um, and, and faster computers also were not successful. That’s when symbol processing logic had its heyday in AI. So maybe today we’re at a time when computers are still not fast enough, maybe we need quantum computers or something to actually simulate the evolution of the growth process and neural networks.  </p>



<p>Robin    00:26:58    So not deep learning, but, but, but, um, neuro evil devil learning, if you will, of artificial neural networks, and then they suddenly will become powerful. That’s one thought and just the last thought, and then you need to stop me because otherwise I keep on talking. It’s also a big question of what we’re trying to achieve, arguably, to achieve a single task. I don’t see why this enormous effort that I’m talking about here right now would be better if you just need face recognition is, you know, the deep learning is amazing. And I don’t see how this, this, this like orders of magnitude more computational effort evolution of the neural network would do that one task better. But this of course brings us to the question of, you know, artificial general intelligence and where we’re really going with this. Maybe you need that more, much more effort process. If you want to go beyond single task artificial intelligence.  </p>



<p>Paul    00:27:55    That was a lot that you just talked about, maybe where we could start, uh, is the concept of, uh, unfolding information and algorithmic growth, right? So you had mentioned that there’s not enough what you alluded to, that there’s not enough information contained in the DNA, in our DNA to encode all the connections, uh, synaptic connections in our brain, uh, it’s orders of magnitude, less than you would need to encode the entire structure of our brain, but that through the process of, uh, genes becoming proteins and transcription factors, uh, and, and through the developmental process, through which you call algorithmic growth, uh, that information unfolds and that essentially encodes the, uh, program that results in our connected brain. So could you talk about the concept of unfolding information and algorithmic growth?  </p>



<p>Robin    00:28:50    So the, the, the, the words are the best. I could find unfolding information and algorithmic growth, but they’re just putting a label, um, uh, on, on something that we clearly observed. Uh, we know that if we look at an apple seed, we can get all the information out that’s in there, it’s in the sequence of the DNA. You know, maybe some lipids around it are important. We need to know some physical laws and, you know, based on experience that you’ve seen it before, that if you put that seed in the ground, that would be an apple tree one day, you know what it will be, look what it will look like. So in the apple seed, there is no way to read that it will be an apple tree. Um, the only reason why we know that this is what happens because we’ve seen it happen before. If you would know, then they see the episode will not reveal the secret. You can only compare it to other sequences you have and what they did in the past, but that’s the same sleigh of hand, right? That’s the same cheating based on previous outcomes, even if  </p>



<p>Paul    00:29:57    All the DNA and, and know everything about the contents of the Appleseed. Yeah.  </p>



<p>Robin    00:30:01    Alright. So this is what I’m saying, and this is controversial, correct. Clearly there are more optimistic, um, molecular geneticist than me that feel like one day, if we just know enough, you know, we just will understand just how this happens. And so, um, this is what brought me to try to find out whether there is any more solid way, any mathematical way. Um, is there something in science that tells us whether or not something like this can be unpredictable, that you have a simple code, right? Let’s just call it one gigabyte of genetic information. Simple. I mean, it’s not amazing, but just for, you know, in relation to describe, let’s put it like this, describing the DNA sequence in our genome, or in an episode compared to describing every single neural connection in your brain, or even the branching pattern on an apple tree is like, you know, it’s like comparing almost nothing to an enormous amount of information, right?  </p>



<p>Robin    00:31:02    So clearly something happens. See if clearly during growth, you know, there’s, there’s more in your brain, then you can read in a sperm and an egg. And so I try to find examples. So I talked to a mathematicians and they looked in other fields again. And so I came across something that I’ve known for a long time, but it’s not been obvious to me where the connection lies. And I guess it’s a connection that, that still requires some explaining. There are examples of very simple rules, very simple codes that can lead to a lot of what we like to call complexity. And the example that I used in, in the book, uh, our cellular automata, right, Steven evolved from made them quite famous with this book and you kind of signs. And he showed that there are types of one dimensional cellular automata. I’m not going to go into details.  </p>



<p>Robin    00:31:51    There’s super simple roots. It can do it on math paper. It’s like, you know, the deterministic, they always produce the same thing. They’re boring in many ways, but he could actually show that with a super simple rule set that I can write down in one line. You know, if this, then this, if this, then this, if this, then this stuff, um, and it’s just black and white squares, if you keep on applying the same rule again and again, and again, you’ll grow a pattern that never repeats. And that will literally grow with infinite time to infinite complexity. And for one of those rules, there is actually a mathematical proof by a coworker of Singapore from, from the nineties already, that shows that it is undecidable, which is, which is math speak for unpredictable. So this is actually what is funny, funny to you, but I think the answer is this is what is called the universal Turing machine. If it can contain in its pattern, every single computation that you could possibly do in math. And this proof shows that it shows that a very, very simple rule set can produce if a complexity, it’s the smallest known the simplest known universal Turing machine in science today,  </p>



<p>Paul    00:33:06    This is rule one 10, right?  </p>



<p>Robin    00:33:08    This is rule one 10, the cellular automaton,  </p>



<p>Paul    00:33:11    Which by the way, I think would be a pretty good band name,  </p>



<p>Robin    00:33:16    I guess. So, yeah, one 10. This is a, is a number, you know, that’s the emergency number in Germany also.  </p>



<p>Robin    00:33:26    Yeah. So, um, sorry. Uh, and it is, you know, it’s produces infinite complexity and we know that you can’t predict what comes out of it. So what that means, you know, this was, I think I’m making it too complicated yet. It’s very simple. You can literally do this on paper and everybody, you know, in grade one, you could already do this, like draw a line after line after line, and you will find that the pattern never repeats. And it’s beautiful. Um, and you may ask, well, could I have predicted, is there any kind of math that would allow me from just knowing the code, what the beautiful pattern in the end would look like, and now you see the analogy, right? This is what biologists, and we all would like to know, is there any math that would allow us from an apple seed or a humans permanent act to predict without any previous knowledge of outcomes, what comes out of it, what brain wiring have to have.  </p>



<p>Robin    00:34:20    And so for this super simple mathematical concept for this rule, 1 10, 7 automaton, we know they can not be any way, any analytical math to calculate what say row number 1000 looks like there’s only one way to do it. You need to let it grow. You need to lead an amount of information unfold that you then need, if you want it to describe that end point bit by bit would be quite a lot. But if you would only want to describe the information needed to grow, it would be very little, but you can’t predict from the little information what the end point would look like without actually growing it. So this is why I talk about unfolding information, and this is why I call it algorithmic growth, which is, you know, just a, a simple description of what we’re seeing here, right? It’s algorithmic because you use the rule set again and again, and you grow something and there’s no shortcut to that.  </p>



<p>Paul    00:35:19    But so in the example of the cellular automaton, this is a very simple system, right? And the idea in your book is that DNA doesn’t encode the end point, but it encodes the algorithmic, the algorithm, uh, to grow things and DNA, and the developmental process are way more complex than a cellular automata. Um, and one of the daunting things is that let’s say we could go a lot of different directions, but one of the daunting things is let’s say, you know, your DNA encodes a protein that protein has a w what were you searched for is the function of that protein, right? But through the, uh, time and energy in the algorithmic growth process, the quote-unquote function of that protein varies depending on different contexts and different stages of development. And of course, then you have all of these things interacting. So somehow the algorithm is encoded in the DNA and development takes care of the rest.  </p>



<p>Robin    00:36:19    Yeah. Isn’t it beautiful? It’s, it’s a problem. It’s just, it’s so supremely non-intuitive right, right. How we know it happens. I mean, there’s, there’s no, you know, there’s no magic. We know that you have a seed, you have, uh, you have an egg and sperm. Um, and we know that given enough time and nurture, you know, something beautiful will develop and developmental biologists have just been studying how that happens, right. Instances of this snapshots of this,  </p>



<p>Paul    00:36:50    But in principle, a one cannot look at the DNA code and infer the algorithm. Right. So, yeah,  </p>



<p>Robin    00:36:58    I actually don’t know that. Right. So the reason why I like this simple rule one 10, and you’re right. It’s of course, much simpler. That was ridiculously much simpler to the extent that it really is not a good model for brain development at all. It’s just an example that shows that tiny amount of information, even deterministically and the same rule applied again. And again, I mean, the simplest possible thing, if you put in enough time and energy can produce something of literally infinite complexity that contains everything, any possible computation. So my argument is kind of that simple thing already can produce infinite complexity. Then we should definitely, at least not be surprised that something that’s so much more complicated, like a genome with a so much more complicated and prolonged and protracted developmental process. Like, you know, what happens for nine months in the womb.  </p>



<p>Robin    00:37:50    And then for many years, thereafter can lead to quite remarkable what we like to call complexity in brain wiring. So what’s so Supreme really non-intuitive about this is where does information come from? If the stuff in the genome is so little and what I need, the information I need to describe the network, connectivity is so much where does it come from? And this is really the core of understanding the algorithmic growth process and the time and energy it takes. So there’s a lot of beautiful discussions we could have now. And the physicists who are listening to this will know of course, a lot about this, right? I mean, the fact that you can describe entropy in many different ways and find it, um, and you know, you can describe the information content of, uh, of, uh, heat exchange between my room and the outside and so forth.  </p>



<p>Robin    00:38:51    Um, the time and energy you put in puts in information. So this is not easy to explain, but this is of course will be no happens, right? So it’s not like I’m saying something outrageous. We know that, you know, there’s a seat, there’s an apple tree, all you needed in the meantime, it’s time, energy and water and sunshine. So just information theoretically, what that means for brain wiring and how much information there is in a actual wetware biological neural network, I think should not be underappreciated. The amount of information that has grown into that thing while you were nine months in the womb and by your, you know, growing up as a, as a toddler and later teenager, uh, with all the characteristics that, um, you have at these developmental stages that we immediately recognize, you know, nobody mistakes, the teenager for a toddler behaviorally should not be under appreciated and neither should be learning.  </p>



<p>Robin    00:39:59    But, you know, the, this is basically the trying to find the sources of information. And then coming back to your original question of our statement about pragmatic approaches in AI, that of course must try to shortcut this, right? If you want to just make a deep learning neural network that recognizes know that helps your business grow, um, you know, you want it to work. And so you’re not going to go through all these processes and you don’t need to, but then the question is, can that thing ever be what our brains are? And, um, that’s where I think is no shortcut.  </p>



<p>Paul    00:40:44    So you mentioned learning there and, and connectivity. So I’ll ask about learning first. You, you consider, so, so the modern AI begins with learning essentially. I mean, there’s some, like you said, there’s, uh, some dealings with architecture and, uh, how many units to use, et cetera. But then the almost sole focus in deep learning is the learning process. But do I have this right? That you see learning as a continuum of the algorithmic growth process? Or do you see it as a, you know, is it separable? It’s not like, so what I’m guessing is that you see, um, uh, our continued learning throughout life. And, um, as we develop, these are not separable processes from the, uh, algorithm algorithmic growth process. It’s all one big  </p>



<p>Robin    00:41:33    Process. That’s a question. Sorry. Yeah, it’s a, it’s a, it’s a, it’s a wonderful question. And it’s a big distinction between what we know about biological brains and artificial neural networks, artificial neural networks, however close. You want to make the design to the brain. They still have an on switch. And then there is this break to talk about, and then learning starts. So there’s a design period. And then there’s a learning period. Biological brains do not really have that. Um, that is of course, a period when you do not yet have learning. That’s when the neurons are in a state where they not yet excitable cells, they are not yet connected early in embryonic development. You have like all kinds of other developmental process that have to start making that network. So you could say there’s a break in the sense that there can’t be learning yet while the connectivity develops.  </p>



<p>Robin    00:42:23    However, the moment the neurons start making connections, things start happening. Part of the developmental process of every neuron is that it becomes an excitable cell. They will start to spontaneously excite each other. We now know that a large part of very early brain development are activity waves that sweep through parts of the brain, both in an insect as well as in the human brain. And these activity waves are the brain learning from itself already prior to even input. So part of the purely genetic program, no learning edit no environmental information edit part of the purely genetic program is already that the neural network starts talking to itself. And that’s part of what changes its connectivity, even before you’re born. And even before there’s any input, the moment there is input, you’re still inside the genetic program in the sense that the way evolution selected for the developmental, the genome that encodes that developmental program is that there is a time when for development to conclude properly.  </p>



<p>Robin    00:43:36    Certain input is absolutely needed. I mean, they’re horrific experiments. They don’t even want to tell you about to do, to deprive a human of a certain input afterwards. Um, and then certain things will never develop. And, and there are critical periods as they’re called in biology, that if then the input doesn’t happen as part of the algorithmic genetically encoded growth process at that right time, if you don’t, you know, if you’re not talking to, if you don’t get visual input, if you don’t get auditory or factory input, certain things, um, can never even be made, um, recovered. And they become part of the growth process. So the genetically encoded growth process continuously partakes and accompanies the learning process. The moment the network is a connected entity, and this includes activity before there’s any environmental input before learning from any environmental outside information before any nurture, if you will. But it also includes that nurture has to be right then and right there as environmental input, um, as part of that period of the growth process, uh, and their continuous our entire life in one way or the other,  </p>



<p>Paul    00:45:01    It seems like such a delicate process that, you know, there needs to be certain things happening at certain times within or else the algorithm, um, doesn’t function properly. Right. And yet, uh, we also seem to be quite robust organisms. How do we reconcile those two things? Because th I mean, at first pass, doesn’t it seem like, well, you know, anything goes wrong at the wrong time, in the wrong place and the wrong environment. Uh, it could go haywire and yet, um, we are surviving thriving organisms.  </p>



<p>Robin    00:45:33    Yeah, it can actually, I mean, um, you know, the, of course only see the winners walking here, right? I mean, all those experiments that evolution continuously, um, makes that don’t make it just simply not there. Uh, the question of robustness is a beautiful question that, uh, that neurobiologists, uh, both developmental as well as functional neuroscientists are struggling with. And there, there are certain features that we have learned that are key to the robustness of this whole program. And I think the most important feature is the idea of autonomous agents. The idea that an individual neuron actually knows nothing about the brain and individual neuron has its own algorithmic growth processes, kind of, you know, it grows an axon like the cable that needs to connect somewhere else. And, you know, just when it needs a partner to make synaptic connections with the it department, it happens to be exactly there, right?  </p>



<p>Robin    00:46:34    This is how the beautiful ballet of, um, uh, development unfolds. But if the partner were not there, the neuron would still run its program. And, you know, for example, in this particular example, by enlarge in Europe would just make actually isn’t empty contact with somebody else. So it would not be quite right, but it would probably be much better than nothing. And you can early in embryonic development, you can do crazy experiments. I mean, so we work with flies in the lab, right? I mean, so in flies, when you develop part of the visual system, you can early in development, easy, go in and just kill half the cells. And then when you look at the final outcome, everything is perfectly finding it. So it just turns out that all the remaining cells during the growth process, they kind of just did what they normally do, but because there were no others that they would normally have had to compete with, which would have led to some of them dying.  </p>



<p>Robin    00:47:30    And some of them surviving, they kind of also survive and they just fill the space. So this is very robust and it’s robust because it is not a blueprint directed maker with some factory and robots that assembles it. But it is a self-assembly process of lots of individual autonomous entities and each individual neuron like every other cell in your body has the capacity to encounter different environments, to encounter surprises when things go wrong during development. And when things happen during normal function, that is of course unpredictable in an unpredictable environment and deal with it. And that’s one of the key ideas that we know is important for robustness. And it’s a very interesting concept because self-organization is therefore absolute key to the wetware neural network function of any brain, um, as it is for its development. But self-organization is kind of, you know, Matt kind of implicit in, in, in artificial neural network research, but it’s slightly avoided. So, you know, you could argue that gradient descent and, you know, parts of how backpropagation work and how neurons communicate has features of self-organization. And I think it does, but it’s not a major topic in the design and training of neural networks. When you look into the literature of the field,  </p>



<p>Paul    00:49:13    I want to back up here, a lot of the book is dedicated. You mentioned the, the axon growth process. A lot of the book is dedicated to describing, uh, both the history and the current science and the controversies of, of how axons reach out and make the quote unquote correct connections. Although you were just mentioning that there isn’t necessarily a correct connection from the start, because these are autonomous agents essentially, and they self-organize and end up and do okay. Before we talk about that process, thinking about the code of the DNA. So the book is all about the, uh, eventual connectivity of the brain, right. Brain wiring. But a lot of what is in the DNA is all must also be dedicated to metabolism, right? Because we have to survive in a far from there. My dynamic equilibrium state do you see is metabolism and the metabolic products, right, uh, that are coexisting with the connectivity products. And that algorithm is, is the, is the metabolic code separable, uh, from the connectivity code within the algorithm. And this is, I’m asking you to speculate, unless you have a definitive  </p>



<p>Robin    00:50:27    Answer. I dare say, I have a definite answer. I’d say it’s not separate. And it’s not separable. This is a big discussion also in the larger, now a life community rather than AI community about embodiment, right? The idea of, you know, as a simulation enough and how much do you need to simulate? You know, do I need to simulate the metabolic, you know, things happening at individual soon EPCIS or is it enough to just have, you know, a value for us in optic weight, this kind of thing. And if you talk about cells acting as autonomous agents, they start to have, of course their own drives and they need their own, you know, at least minimally simulated metabolism. But, um, yeah, we, we, you know, this, this, this, this is at the heart of self-organizing versus a designed, uh, entity, but you’re asking about genes.  </p>



<p>Robin    00:51:22    And so we look at genes, of course, people have been trying to find like individual genes that just tell us something about brain wiring, right. Is there like a gene for this one synaptic connection that’s of course nonsense, because we have, depending on how you want to count, let’s just say 20,000 genes that humans, and like some ridiculous number of sentences in the brain. So how does this work? And so then people were trying to find, is there, like, you know, maybe it’s like surface proteins that sit on one cell and another, and then they recognize each other and these proteins exist, but then you find that the same surface proteins, you know, they also functioning in the liver or, you know, somewhere where like, uh, you know, blood vessels grow and need to branch and do things. And, and you try to find, you know, okay, this metabolic stuff, is this just, you know, it was metabolism only like kidney, heart and liver, or how about the brain?  </p>



<p>Robin    00:52:17    And then you find like, yeah, of course, you know, most metabolic enzymes are actually expressed as particularly high levels in the brain. And it’s the Oregon that requires the most energy. And so basically the bottom line is that, you know, there are hard, a few hard specific genes, and there are a few brain specific genes, but by enlarge, this whole question is not very good. The idea of hoping to find, you know, a gene that specifically tells me how the brain wires, there are very few genes in the human genome that will not be turned on and off that will not be read out in one way or another and one cell or the other during brain development. And it’s all part of the algorithmic growth process. If solution didn’t care about our intuition of say two molecules that recognize each other and could be a key and block forest and empty connectivity, even though, you know, we would love to write, we love to read papers like this as developmental neuroscientists.  </p>



<p>Robin    00:53:19    Look, I found another cell surface protein that’s exactly on that cell. And the lock to that is another protein on the other cell. And you know, both the genes of the genome, and this is how this one synaptic connectivity is wired, but, but evolution, I dare say really tried out any kind of mutation in regulatory sequences and coding sequences of any kind of gene. And as the growth process unfolds, you will find that the mutation in some ubiquitous that’s, you know, biology speak for expressed everywhere, uh, leaks, pressed, metabolic enzyme, a mutation in that thing that let’s say changes just, you know, it increases 5% of the function that’ll turn out to be completely irrelevant for your heart and your kidney, whether it ends up as it. So, but during brain development, there may be just this one, neuron that if you increase this particular metabolic enzyme, 5% at the exact time, when it is making a snap to connection, it changes the speed say because of the increased metabolic rate of the time, windows are short and say the time window of when it can make a certain connection and it will lead to less connection of one type and more connections of another.  </p>



<p>Robin    00:54:39    And that was a mild change to a metabolic enzyme. That’s everywhere in your body. And the only change it may cause the outcome completely unpredictable, but, you know, evolution tries these things out, um, was slightly different way had brain. So this is why you need all of it. You need, you know, you can’t just take a synoptic weight as a number, but the information and code it, ultimately in all this synaptic connections and the way to get there required evolutionary process selection of something that worked that was not predictable, like rule one 10, but that, you know, if you had enough millions of years to try out evolution on earth, um, and, uh, evolution of brains, you can figure out adaptations and changes based on mutations in many different genes, and there need be nothing intuitive about it for what scientists would like to see.  </p>



<p>Paul    00:55:44    So modern, uh, deep learning, right? It’s it begins with a network of connected artificial units. And, you know, like we’ve been talking about through a long process, the synaptic weights, uh, become, get adjusted through learning, but it starts as a neural network. And, uh, a large part of what you describe in the book is the developmental process that leads to a network, which is a bunch of neurons connected. Um, and you give multiple examples throughout the book, uh, of different connection patterns that can happen depending on the sequence of, uh, of the development that happens. But, and yet, uh, the focus is still on sort of the end, um, connectivity of the brain that you end up with a network of neurons with connections between them, our neurons, the end all though, uh, do we need to consider things beyond neurons like astrocytes and Gloria, uh, is that part of the whole algorithmic growth process that, that may eventually be important for building better AI and understanding how this all works or, or do we really just need to focus on the network of neurons?  </p>



<p>Robin    00:56:52    So again, we need to come back to what we want, what is it that we’re trying to achieve to make an algorithm to predict what to buy? I don’t see why you would need astrocytes if you want to have, uh, any kind of resemblance to what you may want to call the human AI, I’m hesitating because almost I’ve tried to use the word general artificial, you know, there are so many terms, they’re all undefined, right? I mean, artificial general intelligence, nobody who knows what the hell that’s supposed to be. And that has a lot to do with there being no definition for intelligence. And you know, the idea then that you want to have a human intelligence. It’s very different to me from human level. I don’t even know what level is supposed to mean, right? That can measure a level of playing chess, but I can not measure level on being, you know, Paul know how Polish is my intelligence and I think many different, um, people will be more similar or less similar to your individual type of intelligence. And that requires your entire brain.  </p>



<p>Paul    00:58:01    One of the, uh, seemingly important features of brains is the feedback loops that occur within the, if we can just stick with neural networks, uh, the brain is a highly, highly, highly recurrent neural network. And AI is taking this on deep learning is taking this on and, and, you know, people are using recurrence. Um, but my, my question is thinking back to the algorithmic growth process and a self-organizing system, and, and actually in the book, you talk a lot about levels. So going to the DNA level, then, uh, it is interesting. And I’m, I’m going to ask you if there’s a deeper principle involved here. Uh, it’s interesting that an enormous amount of DNA is devoted to, uh, feedback in the form of regulatory proteins that feed back onto the DNA and regulate what’s being encoded by the DNA transcribed. I should say, harking back to my own molecular biology days, trying to remember the words. Um, and then through the developmental process, it’s, it seems to be that, uh, these feedback mechanisms are also, uh, you know, uh, like a majority of the processes. So is this a deeper principle that, um, within algorithmic growth to have a robust system, um, that you, that feedback is the main thing?  </p>



<p>Robin    00:59:18    I think, so it is a very important, I mean, this has been recognized, you know, he can go back to the old cybernetics days, right? This is Norbert Wiener and others who first formulated and quantitatively worked on ideas of feedback and how they, uh, determined self-organizing systems. Um, very specifically to, to the example you give, it goes back to, to, you know, everybody’s biology 1 0 1, right? The, the genome doesn’t change by and large, um, from, you know, once the, the, the sperm has met the egg, you’re kind of done. I mean, that’s your genome and every single cell in your body has it. And it’s the same genome and every single one of your cells. And of course it, you know, what, what makes the cell in your, in your, in your eye, different from one in your heart is that different parts of the genome have been read out during a developmental process.  </p>



<p>Robin    01:00:21    And that process, as we already discussed a little bit earlier, never stops. Um, we know in the brain, when it comes to learning that a to form long-term memory requires the feedback, going back all the way to the genome, new transcription of the mediator of what will become proteins, the famous RNA, and then making the proteins. And then those proteins get incorporated into whatever molecular function you need to change the physiology of the cell, many of the proteins that get this way, read out from the genome are proteins that themselves have this funky property of binding back to the genome, which then leads to yet another different type of protein being expressed. And, you know, maybe it’s not one, maybe it’s a different thousands again. And then one of those thousands is again, one that binds back to the DNA and changes it. So both the internal program that keeps on running changes the cell continuously in a feedback process and that’s part and parcel of any development or growth process, but also the environmental input.  </p>



<p>Robin    01:01:32    Once you have a neural network will feed back to that genome. And the genome is always just there. It’s like a book that’s always there and you always just need to decide what to read in that book and to access that book, it’s just enormously complicated. You can’t just open page 255. You literally need a very strange combination of say 30 different proteins that are super unlikely to ever exist at the same time in the cell. But if they do then in particular gene combination, you know, these 231 genes or something will be transcribed and you will have a new state of the cell. And this is of course what happens all the time and none of this, I mean, very few cells. There are few cells that actually are very silent in our bodies, but most cells in our bodies never, ever stopped that feedback process.  </p>



<p>Robin    01:02:27    Right? If you have an injury in your skin, goes all the way back to changing what will be transcribed, um, cells in your heart and of course, cells in your brain. So the idea of a feedback to the genetic information and what will unfold as a next step, this is basically what all of biology and all of biomedical research is about. We’re continuously studying when and how, what kind of genes get expressed. And this is such an enormous field. You know, even with this only laughable one gigabyte of base pairs in the genome, that’s not much information if you want to write it down, easy one gigabyte, but the information that can just in one or two iterations of any of the 20,000 chips that are being expressed change, what will be expressed next is just a Combinator Loreal explosion that gets out of hand, like almost immediately, and kind of ensures that researchers will never be out of having something new to study.  </p>



<p>Robin    01:03:36    I mean, it makes sense that AI researchers would want to avoid all of that mess. Yes. Which is why you can’t design it. So the only way you can deal with that mess is you have to give up control over the design. You have to program it literally by making random mutations and hope for the best. And if they’re not good, then what comes out of it will be just not as good. And the outcome and evolution didn’t know, but if solution would just select against it, and if what comes out of it is better than, you know, you keep some of these new randomly tried out mutations and you program something better because remember rule one 10, you know, if there’s a proposal, but take it for, for, for, for what it’s worth now, as a, as a, as a hint, that it may just be just as unpredictable, how the genome and coats, you know, what comes out of a given genome without having prior outcome information.  </p>



<p>Robin    01:04:44    There’s no other way to program this. And, you know, if you do this, then it would be nice to at least keep it, you know, not hyper multidimensional, right? To at least have only a few genes and a few interactions. And, you know, then it’s still a lot of computational effort to simulate that, to basically do this experiment. And for every single slightest random change to see what comes out of it, to do this entire effort of, you know, nine months in the womb. And then, you know, all those crazy teenage years where you just don’t know what you want. And then finally sitting here in a podcast, that’s not what you would like to do if you have a pragmatic job to do in programming, in neural network  </p>



<p>Paul    01:05:32    And the book, I guess, I guess you get a little philosophical, uh, about generality and specificity talking about the growth and development process and how, uh, different proteins are used in different contexts at different times in different environments. And what we want to do as humans to understand what’s happening is we want to have a very general principle, right? But then it’s really difficult to say, well, you know what, I’ll just read what you write in the book. This is a bit of a conundrum we can choose between fewer general mechanisms that explain more instances, less or many less general mechanisms that explain fewer instances better, where we draw the line for generality is anybody’s guess. And this is a recurring theme of, you know, you also talk about levels and what’s the right level to explain a given system. And, and so on. Uh, I don’t know, could you, could you just comment on that balance between generality and specificity and that conundrum?  </p>



<p>Robin    01:06:34    So in, in, in, in our field of developmental neurobiology, um, you know, it’s funny how papers are written, right? I mean, of course everybody is studying like a super specific system. I mean, you know, we study flies and some other people study mouse in a specific neuron at a specific developmental stage where it makes a certain choice and you want us to tell molecularly it does that. And so of course, every single one of these scientific publications about a process like this then has to say, you know, we are looking at this super specific thing, but then the hope is of course always, yeah. He know, looking at the super specific thing, but really it’s very general, right. I think we found something in our specific instance of the problem that tells us something, how this generally works. And so, you know, everybody does that everybody then writes like, you know, this gives rise to potentially general principle operating wiring.  </p>



<p>Robin    01:07:34    And the classic ones are of course the attractive or repulsive molecular interactions. And they’re clearly part of how neurons interact and how brains are wired. You know, these are, these are words that scientists use, but of course generality is as you know, the, the sentence you just read, I hope kind of suggests they’re not black and white. I mean, what does it mean? Like, you know, this is a super general principle. I mean, I can give you the super general principle that everything that happens is read from DNA into RNA and then into a protein, and then the proteins interact and zoom, you gotta break. That’s a really general principle and everybody had agree. I mean, you know, this is what happens, the feedback stuff that we’ve been talking about, that’s a general principle general principle that you have continuous feedback off the proteins, which are the products of the genome onto the genome itself to change what next will be read out from the genome general principle.  </p>



<p>Robin    01:08:32    But of course, if I phrase it as general as that, it tells me very little about how this one year on medicine, haptic contact with other neurons. So there, I need to become a bit more specific. And then the question is, you know, how specific do I have to be about every single molecule that is there right then, and, you know, at the right place at the right time to put this thing together, to understand this instance of the problem. And then still, I want to say, yeah, but this is now a general principle. So it’s just, uh, you know, I’m not sure it’s very philosophical. It’s, it’s, it’s just an observation that, that, that we have when we’re trying to understand any system. Right. And you can always make a very general statement. That’s almost certainly true, but really they’re not very helpful anymore.  </p>



<p>Robin    01:09:21    Or you can make a very, very specific statement. That’s really helpful to understand that specific thing. But then of course, it’s not going to apply anymore to every other, you know, it’s, my dad thinks that every neuron has every, every development of a synaptic connection has in common. And then there’s things that must be different in the end. The idea that you need all of it, that you do need every single one of those molecules that have to interact that unfold specifically at some point differently, every single synapse in the brain, such that every single one is in some way, different from any other is irreducible. If you want to have that thing in the end, the brain. And so the, th th th th th the selling point of, yeah, but we’re looking only at generalities, um, is helpful to a certain extent, but at some point we just have to appreciate that it’s really an arbitrary choice, how deep we look at any given. So naps and at any given neuron,  </p>



<p>Paul    01:10:38    The end of the book, you argue that. So you talk about whole brain emulation, and you also talk about the brain AI interface that is oncoming with current companies trying to do this, like Neuralink, et cetera. But you argue that whole to emulate a whole brain. You actually need to do it from start from the molecular level. You need all of those details, right. Um, how strongly do you believe this? And have you received a ton of backlash on this?  </p>



<p>Robin    01:11:08    I, how strongly do I believe? I mean, the data just shows. I mean, this is, I try to avoid, I try to believe anything really. Um, but I’m very happy to be proven wrong. The biological systems that we study are just that like that, right? If you take out any component at any point, you have surprising, uh, implications. And as we discussed earlier, this is exactly how evolution programs are brains. So if you take any component out, if you simplified in any way, you can still get something that’s amazing, but it’s not going to be that thing anymore. So if you want human intelligence, you know, the argument is you need every single one of those molecules. You can’t take any of that away. That doesn’t mean that you can’t produce something more intelligent than a human in many other ways, that’s just not going to be a human intelligence.  </p>



<p>Robin    01:11:59    So, um, in terms of backlash, um, the idea is of course not very well received by the more pragmatic, um, camp of, uh, uh, deep neural network developers. Um, but they don’t mind really all that much because, you know, they know that what they’re doing is amazing. They know that what they’re doing is successful in what they’re trying to do. And, you know, it becomes a little bit philosophical again, then to talk about where the whole journey is going. So we’re, um, so, um, I don’t think there’s, so I’m not actually clashing with any of these amazing people who are developing, um, neural networks. I think we’re all equally impressed with those where I am genuinely clashing is with the prediction of where therefore, inevitably current technology of neural networks has to go. The notion that clearly the next thing that happens is even more intelligent thing.  </p>



<p>Robin    01:12:58    And the more intelligent thing will have this, this enigma, Matic property of being able to produce a, yet more intelligent entity. And therefore then we have superintelligence and runaway intelligence, and we have the famous omega point and we have singularity, and then they’re all going to take over. So yeah, you know, I disagree. We are, we are not just far from this, we’re just not even anywhere near the right path to anything like that. And, uh, the argument has a lot to do with what we understand about what it means to make an intelligent system, the key argument, um, I would be against this whole singularity debate if this is where our trend is going right now, which I’m not an expert at all. Right. But I mean, I’m coming from another site. And so I just, I’m just raising a voice, a critical voice here.  </p>



<p>Robin    01:13:48    And is that, um, there’s no example anywhere in the known universe of an intelligence system producing the system that is more intelligent than it, our entire discussion that we just had circled around the idea that evolution is the only thing that can program a thing like our brain precisely because of the unpredictability of rule one 10 and what the outcome is, and therefore you need the entire unfolding thing. So the, the notion that just because something is even more intelligent than us, it will automatically inevitably have that enigmatic ability to produce a, yet more intelligent thing. I don’t see why at all. I mean, why are we not able to make something more intelligent than us then we’re not. I mean, there’s no example of that in history. Um, that’s the first major criticism and the second major criticism, if you read a book like superintelligence, um,  </p>



<p>Paul    01:14:46    I don’t know that I recommend reading that book. It was a rough one. Yeah.  </p>



<p>Robin    01:14:51    Interesting. Um, I agree with you. I mean, but, um, you know, it has some, it tickles the, the census and the excitement about the future as it has always done. Right. I mean, there is a title, absolutely unproductive branches of so-called science that have been dealing with this ever since Vernor Vinge and this whole, you know, superintelligence thing came up many decades ago. Um, they haven’t produced anything. They’ve just been talking a lot and making a lot of money. Um, but you know, one of the things that, for example, Bostrom desperately needs and just glances over in like three sentences in order to copy your brain is just the brain scanner, right. You just scan your brain and you get the information out, and then you can take that information and make another one. Well, we have just been discussing, uh, you and me the entire time, and we don’t even know what information we need to what molecular level in order to have a brain that actually functions. So, so, so what is it that you are supposedly scanning in theory, at least, which is practically absolutely pipe dream, absolute pipe dream, but in theory, at least you could do like a, like a, you know, molecular scanner that takes every single molecule and makes a copy of that. And then you have that other, but that’s of course not what they mean. They, they, they all mean that clearly it will be, it’s gotta be enough to have something like say, you know, states of synopsis and some digital representation there off that’s a pipe dream.  </p>



<p>Paul    01:16:25    So I I’m taking it. You’re not a fan of the idea that one could slowly, let’s say, let’s, let’s go with neurons, right. Replace our neurons with, uh, mechanical neurons, um, very slowly. And then we would have a functioning brain that has half, or, you know, however many, however much neurons make up our brain, right. That, uh, that wouldn’t be functioning well, that we’re not anywhere near the, uh, sophisticated level that it would take to create an artificial neuron that would, that would, um, be able to function holistically.  </p>



<p>Robin    01:17:01    I don’t want to be as critical as that quiet. I’m actually very optimistic and very impressed with current machine brain interfaces. As you know, we can now do extra cellular, like, you know, little electrodes and thousands of them into say so much of sensory cortex. And that’ll allow somebody who has lost the ability to move an arm or a leg to move a prosthetic device. And this kind of technology is super, I mean, it’s fantastic and it’s going to improve and it’s going to get better. And it makes perfect sense for companies like, you know, there’s the brain gate initiative. There is neuro link that, of course, you know, cause it’s Elon Musk made more headlines, but it’s not really all that new even, no, it’s not, you didn’t roll your eyes there. I was expecting an eye roll. I often, no, no, no. I mean, I’m, I’m, I’m actually trying to be positive here and I’m really, I love these approaches.  </p>



<p>Robin    01:17:59    I think it’s awesome that they’re doing this. And of course, what it’s, what it’s designed for right. Is, um, if we just leave the, the, the, the, the, the science fiction out is at the moment, tetraplegia, like patients who really can’t move their arms or legs, and, you know, they’re locked in syndrome patients and so forth. If you can have any bionic device that communicates with the brain that helps these patients bring it on. Yeah. And if you’re blind and you can put a little camera in front of your eye and connect it to the visual cortex, and you can, you know, you’re not going to see as you and I are, but you see it as something again, it’s fantastic. And there’s no reason to not assume that this is gonna become, you know, exponentially better in the next years. I’m sure it will.  </p>



<p>Robin    01:18:50    It’s going to be wonderful. And it’s gonna come with its own challenges and there’s going to be some critiques and, you know, will other Borks know, finally coming, and there’s going to be some concerns and there’s going to be some failures, and it’s going to be some successes and you know, how any technological advance goes and it’s all good. It’s all, it’s all part of it. So this is going to happen and it’s going to be great, but this is not what we had been discussing. And this is not what Bostrom talks about when he has a brain scanner to take a, make a copy of your brain. We’re talking about, you know, this is, can you get, do you even know what kind of information you would want to get out of a brain to make that brain or to make any brain, you know, what, what makes Paul Paul, you know, can you just copy that?  </p>



<p>Robin    01:19:35    And then another person can be Paul. Um, this is a whole other level. The amount of information we’re talking about here is something that we really just don’t even understand. And if you just want to bring it on a very technical basis, right? I mean, when we’re talking about a few thousand electrodes and all these things do, it’s super impressive, but you know, these are extracellular electrodes somewhere in a brain region where no one knows what the hell is neurons are doing individually, where individual neurons can die and everything is, and you just get field potentials as it’s called from like these areas of the brain. This is not the same thing as neurons that are intricately wired via all the molecular glory of synaptic connectivity inside an actual brain, their Valiant approaches and efforts. And they’re there they’re to be applauded, and they’re going to be big successes, but they’re not a path to making a human brain.  </p>



<p>Paul    01:20:40    So Robin, there are a ton of other topics that we could go into from the book. Um, I’m aware of the time though, one of the, one of the cases that you make in the book is, uh, uh, a case for the utility of cognitive bias, right? So I’m not going to ask you about that, but what I’m going to ask you about, um, thinking about cognitive bias is during the process of writing the book and, and thinking about these things, and, you know, in the beginning, we were talking about what led you down this path, has your view of intelligence changed, uh, through this process, or is your cognitive bias, uh, stuck where you had the same view of what intelligence is and what it means, et cetera, uh, from, since before you were writing the book,  </p>



<p>Robin    01:21:28    You know, that the embarrassingly safest answer is that I can’t possibly know because I’m sure I’m somewhere stuck. Right. And I’m, you know, caught in my own brain and my own biases and my own growth history. Right? So I, I come to the topic of biases through the growth history, right. Which is, which is, um, uh, serendipitous and idiosyncratically individual and makes you, you and Mimi. And of course, new information coming into a brain is not compared on an even footing with the information that is already there. I mean, we know this, um, and that has a lot to do with what, you know, how you have used your brain in the past. Um, let’s just not go into all the examples that leap to mind right now about, uh, about, you know, data unbiased, um, beliefs that people, uh, have right now, this is how the brain works.  </p>



<p>Robin    01:22:22    So, you know, um, this is a dangerous area to be in, you know, they actually, I mean, this is not my field, but they’re, they’re, they’re, they’re, they’re amazing experiments in psychology out there that actually show that people who are trying to not think about these things and people who are not trying to worry too much about themselves, actually more successful, you know, to be ignorant to your own. Uh, you know, you can look up these things like the embarrassing, uh, questionnaires. So it’s a very difficult and dangerous area to go in, right to the, you can go and aspire down in self doubt very easily. Um, if you questioned data-based every single part of your growth history based a brain that happens to function based on that growth history. So, um, has my view of intelligence or, uh, any other aspect of the book changed? Yeah,  </p>



<p>Paul    01:23:17    Well done. So the way that I asked the question, it sounds like I’m asking, what was your view before and what is your view now? That’s not actually what I’m asking, because you know, like you’ve already said, we don’t really know what intelligence is. What I’m more asking is, do you feel like your approach to understanding intelligence has changed throughout the process? So you can answer, I thought this before, and now I think this, but that might be an impossible question. Of course,  </p>



<p>Robin    01:23:45    I think, I think I’ve become more humble. I mean, if I, if I dig into my idiosyncratic past, um, I remember being a young 20 something year old scientist who thought he knows everything,  </p>



<p>Paul    01:24:00    But that’s what got you to where you are.  </p>



<p>Robin    01:24:02    I’ve never been more insecure than ever my life that I’m now. Um, you know, maybe that’s a good thing. I, yeah, it got me to where I’m at exactly. I’m humbled the, the, the process of writing that book and, and, and making the connections that are trying to make as a humbling experience, because you have these moments, when you realize parallels in history, you see that all these things have happened before, and you find these references in people who have thought these thoughts before. Um, there’s so much out there there’s so much beauty and so much knowledge in the literature, people who have, you know, if you just go into what people thought, if I could have gotten completely lost just in the cybernetics years, between the forties to sixties and, and all the thoughts that people had back then, that Ross Ashby and so many others, and it’s humbling, right?  </p>



<p>Robin    01:25:05    Because then here you are, and you’re writing this book based on, you know, there’s gotta be something that we need to talk about because we should learn from each other in the AI and neuroscience communities, based on the idea that, you know, we have a genome and you don’t, and then you dig into this and it’s of course, a bottomless pit. And so many thoughts have been thought, have been thought before. And I’m with every minute that I’m spending on a project like this, changing my brain in a certain direction and not another and time. So scarce. Yeah. I’m running out of time every day, as, I guess we are right now in our conversation, you know, if I want to investigate something, I need to write this chapter, you know, the temptation to just get lost and, and dive into this, uh, infinite depth of almost infinite depth of knowledge that is out there, it’s a humbling. And so then you, you know, you, you kind of, I find myself then disciplining myself and saying, okay, I have one week for the same. Then I have one week to interview people and to learn about this. And you know, this is how far I go. And, and of course, if you do it well, you must be after this one week at a state where you realize, oh my God, there’s so much more than I initially even thought that could be known that it must necessarily be humbling.  </p>



<p>Paul    01:26:34    So one of the things that you did allow yourself to dive into in the book is, uh, Roger Sperry and his work, and also Mike gaze. And you talk about their differing ideas in the book and go into some depth about it. Maybe the last thing that I’ll ask you, you talk about how there are differences in scientists, right? Some are more vocal and some are quieter. Mike gaze was a, a quieter scientist who didn’t self-promote as much, and not necessarily that Roger Sperry self-promoted, but he was more opinionated and willing to vocalize those opinions. More, both great scientists. Right. I  </p>



<p>Robin    01:27:13    Think it’s fair to say. Yeah.  </p>



<p>Paul    01:27:14    Yeah. The question is, uh, how self-promotional as a scientist, should one be, so now, you know, I’m asking someone who’s written a book and needs to promote it as well. Right. Or, you know, hopefully we’ll, we’ll sell some copies. Right. But is it better to be wrong and popular or well-known, or is it better to be right, but unknown some career advice for, for, for those homemade,  </p>



<p>Robin    01:27:36    Oh God, this is so difficult. I can tell you from the bottom of my heart, that I did not write this book with the intent to, um, uh, you know, make it a New York times bestseller, I would like it to become, and I’m actually delighted to see that it is being picked up more than I initially could have even hoped. Um, so it’s been doing very well, and I’m very, very happy about that, but, um, you know, writing the book is one way of being vocal, but the example of scientists you give right now is also about just how you present data. There’s no way in science to present data without interpretation, the way you presented the claims you make bringing us back to the discussion we had before about generality. Um, but also the manner in which you do this is, uh, has an enormous influence on how it is perceived.  </p>



<p>Robin    01:28:46    And I’m afraid that there are many examples in the history of science where a really good idea, and the person who actually got something right, has not been corrected to this day. We always say that science self-correct in the end. And I believe to the extent that I’m capable of believing anything that, that you know, is likely true in the limit of infinite time, but it’s scary how many processes, uh, many ideas have not been corrected and people actually got forgotten, although they, they set the right thing, right. Um, for that reason. So this is very tricky. I can tell you that I basically did everything wrong. I did write the book. So maybe that’s, you know, in itself something. Right. But, um, because I always felt that science should be something that is promoted by the value of the science itself, which happens through scientific publications and peer review, and then reviews about that work and discussions within the scientific community based on scientific publications themselves.  </p>



<p>Robin    01:29:56    Um, I, until I think two days ago or so didn’t even have a Twitter account. Oh, you’re on Twitter now. I’m not really. So we made an account for my lab. So we just published a beautiful paper 21st of December. And so I decided, you know, the he’s in your lab has now a Twitter account so that we can at least tell our colleagues, we published this paper, but this is literally, I’ve sent one tweet in my entire life. Um, that’s a really bad idea to day. If you want to write a book and if you want it to sell well, you better have a good following. You better have a Facebook presence. You better have a LinkedIn account. You better have an Instagram following. And I literally have none of these things. That’s great. Not even LinkedIn, you’ve caved in now. Well, so we see this.  </p>



<p>Robin    01:30:51    Yeah. You know, how, how am I morally, you know, I don’t want it to be poorly superior. Right? I mean, I don’t want it. It’s just choices we make. And so, um, yeah, there are ways how you can try to sell whatever you want to sell it more. And clearly I have not tried. I mean, there is no advertisement. There is no, uh, Twitter. There’s no tweet. There is no advertisement. There is no Facebook. There is no Instagram that smoking. There’s nothing about this book. Um, other than that, you know, here is, this is what I wanted to tell you. I did this bit of work. I put all my heart and a lot of work in it over three years, researching the history and the current state of how developmental neurobiology and artificial intelligence, see the information problem, the question of how does information get into a neural network? How does a neural network grow smarter? And this is what I wanted to do, and I wanted to contribute this and I’m proud of it. And now, you know, live with it. And if somebody wants to read it and it develops its own life, you contacted. And you’re asking me questions that belie, you know, deep knowledge of, of many of the ideas in this book. And so that makes me super happy. And maybe that’s better than the short tweet, you know?  </p>



<p>Paul    01:32:12    Well, Robin, you should be proud of the book. Um, I didn’t mention the way it’s laid out is a series of 10 seminars. And, um, before each seminar, uh, you have this, you have these different, um, characters like neuroscientists and a robotics engineer and, um, you know, various personalities, um, dialogue with each other. So in girdle, Escher Bach Hofstatter uses this kind of form, but it really doesn’t work like it was with the turtle tortoise and the Hare, I believe. Uh, not for  </p>



<p>Robin    01:32:46    You liked it. Interesting to me, honestly, I have to tell you, it’s been 20 years ago. It’s been 20 years since I read it. So maybe I should look at it again, it’s a bit different, but is that, that he used, he does this really fancy, funky thing where, where they actually talk like in the, like a Canon of . Right. So it’s, it’s very symmetric and it kind of, um, and you know, it also just probably serves a little like relaxation, right purpose that, you know, between like heavily loaded chapters that it’s nice to relax a bit, um, in, in the self assembling brain, uh, the, the semi, the, the, the dialogues a different, they have a different origin and a different idea. Um, the, the reason why they’re in there, they’re not in my original book proposal and they were not in my original draft.  </p>



<p>Robin    01:33:41    The reason why they’re in there is because this is what my notes look like. So when I got just satisfied with my own field, as we started this discussion, I felt like it can’t be that we’re just studying molecule by molecule or building a parts list. And we don’t even know what the end point is. I’m not sure what we’re missing there, but I wanted to know what could be out there. So I just started to go to other conferences. I just went to late artificial life conference and just, you know, let’s see what these people are saying. Right. And, and it was, it was eye opening. It was beautiful. And I met amazing colleagues that I’m working together with right now. And I had discussions with them. And so then whenever, and of course, then when you already have the idea of the book in your mind, and you go to this conference and you just, you know, Chet it up, people say, I come to you and then it just, you know, start to have a discussion.  </p>



<p>Robin    01:34:32    And of course I asked them certain questions, and then, you know, some people are amazing and you just get into this. And then I would always run back to my hotel room and just write down the conversation. And, you know, so as to not to lose the arguments as to not to lose, you know, I asked him this, and then he said that, and I asked her that, and then she actually said, that’s not even a good question. Do you need to do this, this, this or that? And so it was very clear that, you know, people are always talking based on their own growth history in their own worlds. Right. I mean, in the beginning, this discussions are very tricky, right? Because I’m asking one thing, but I’m getting a completely different answer and vocabulary that I’m not necessarily understanding. And then I did this in my own field and, you know, I could ask the same questions. And so I basically started to have, so I had a lot of notes from discussions and all these characters were there. There’s the robotics engineered. There’s the developmental geneticists. There’s the neuroscientist. Was that  </p>



<p>Paul    01:35:30    More fun to write, easier, to write harder to write than the rest of the  </p>



<p>Robin    01:35:34    Text. So this is where I’m just getting it right. So the, the, the, initially I didn’t read it as early as I tried to get the essence from my notes. And I try to write like a normal book. Yeah. And it was very difficult because what you lose is the, the way they talk coming from the different place. Right. If I say something as a developmental neurobiologist and try to describe something and deep learning expert tries to talk about it, we talk differently. And the way we talk is just loaded with our own history and our own way of talking. And the way we even then try to communicate becomes quite interesting. And it takes time for us to find each other. And so this is what happened, right? So I had these notes as a discussion. I tried to distill it, but, you know, just to circumscribe basically where people are coming from is so much harder and cumbersome, and it didn’t read well that in the end, I just felt like, you know, just let these people speak.  </p>



<p>Robin    01:36:33    And so I just started to write the discussions between those people. And it was much easier because then of course, you know, the developmental geneticists can say these outrageous things that I wouldn’t even as an author dare to dare to write in a book, because I think this is like, hardcore is too much, you know, it’s not that deterministic and so forth, but as a character, I can have a developmental geneticists say all these crazy things. Um, because that’s where people are saying, yeah. And then I can have a robotics engineer, of course, come in and say, you know what? We don’t need all your wetware. And it’s all nonsense. And we can do this all much better. You know, I can, I don’t need a bird with all this stuff. I can make an airplane that flies much faster, but these things to say, these things is best said, if you have characters who are themselves and have their own growth history, if you will. And then of course, that became a beautiful challenge over 10 dialogues to have them find each other. Right? So in the first dialogue between them, they’re there, you know, they’re all talking cross purposes, right? They’re all just, nobody understands each other and they don’t even appreciate each other. And so of course, what I hope happened to them is that in the end, they understood each other a bit better.  </p>



<p>Paul    01:37:43    Robin, thanks for writing the book. Thanks for unfolding information with me here today and spending your time and sharing it with the podcast listeners. You’ll be getting a tweet from me soon about the book  </p>



<p>Robin    01:37:55    And you know what I’m going to do. I’m going to retweet that That’s B that might just be the second tweets that I will ever have.  </p>



<p>Paul    01:38:04    Yes, you are on top of your game, sir. So thanks Robyn. It’s been fun.  </p>



<p>Robin    01:38:09    It’s been awesome. Thank you very, very much.  </p>

</div></div>


<p>0:00 – Intro<br />3:01 – The Self-Assembling Brain<br />21:14 – Including growth in networks<br />27:52 – Information unfolding and algorithmic growth<br />31:27 – Cellular automata<br />40:43 – Learning as a continuum of growth<br />45:01 – Robustness, autonomous agents<br />49:11 – Metabolism vs. connectivity<br />58:00 – Feedback at all levels<br />1:05:32 – Generality vs. specificity<br />1:10:36 – Whole brain emulation<br />1:20:38 – Changing view of intelligence<br />1:26:34 – Popular and wrong vs. unknown and right</p>
]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/d3f34f68-4e14-4eb8-a96f-5efd468a3ff8-124-Peter-Robin-Hiesinger-public.mp3" length="95781247"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[




Support the show to get full episodes and join the Discord community.











Robin and I discuss many of the ideas in his book The Self-Assembling Brain: How Neural Networks Grow Smarter. The premise is that our DNA encodes an algorithmic growth process that unfolds information via time and energy, resulting in a connected neural network (our brains!) imbued with vast amounts of information from the “start”. This contrasts with modern deep learning networks, which start with minimal initial information in their connectivity, and instead rely almost solely on learning to gain their function. Robin suggests we won’t be able to create anything with close to human-like intelligence unless we build in an algorithmic growth process and an evolutionary selection process to create artificial networks.







Hiesinger Neurogenetics LaboratoryTwitter: @HiesingerLab.Book: The Self-Assembling Brain: How Neural Networks Grow Smarter


Transcript

Robin    00:00:03    There’s only one way to do it. You need to let it grow. You need to lead an amount of information unfold that you then need. If you wanted to describe that end point bit by bit would be quite a lot. But if you would only want to describe the information needed to grow, it would be very little, but you can’t predict from the little information what the end point would look like without actually growing it. And the genome is always just there. It’s like a book that’s always there and you always just need to decide what to read in that book and to, to access that book. It’s just enormously complicated. You can’t just open page 255. You literally need a very strange combination of say 30 different proteins that are super unlikely to ever exist at the same time in the cell. But if they do,  



Speaker 2    00:00:58    This is brain inspired.  



Paul    00:01:11    That was the voice of Peter Robyn. He’s a singer who recently authored the book, the self-assembling brain, how neural networks grow smarter. Hi everyone. I’m Paul. And today I talk with Robin about a handful of topics in the book. Robin is a neurobiologist or a neuro geneticist, more specifically at free university of Berlin studying among other things, how DNA and the developmental process in codes, the wiring of brains in the fruit fly Drosophila. The central theme of his book is that current artificial intelligence and perhaps a current neuroscience theories are leaving out an essential part of what makes us intelligent. And that’s the growth and development of our br...]]>
                </itunes:summary>
                                                                            <itunes:duration>01:39:27</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 123 Irina Rish: Continual Learning]]>
                </title>
                <pubDate>Sun, 26 Dec 2021 15:46:13 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-123-irina-rish-continual-learning</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-123-irina-rish-continual-learning</link>
                                <description>
                                            <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2021/12/art-123-01.jpg" alt="" class="wp-image-1616" />



<p class="has-text-align-center"><strong>Support the show to get full episodes and join the Discord community</strong>.</p>



<div class="wp-block-image"><a href="https://www.patreon.com/braininspired"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2021/09/become_a_patron_button@2x.png" alt="" class="wp-image-1434" /></a></div>



<div class="wp-block-image"><img width="322" height="400" src="https://braininspired.co/wp-content/uploads/2021/12/IR_Mila_July2020_portrait.jpg" alt="" class="wp-image-1617" /></div>



<p>Irina is a faculty member at MILA-Quebec AI Institute and a professor at Université de Montréal. She has worked from both ends of the neuroscience/AI interface, using AI for neuroscience applications, and using neural principles to help improve AI. We discuss her work on biologically-plausible alternatives to back-propagation, using “auxiliary variables” in addition to the normal connection weight updates. We also discuss the world of lifelong learning, which seeks to train networks in an online manner to improve on any tasks as they are introduced. Catastrophic forgetting is an obstacle in modern deep learning, where a network forgets old tasks when it is trained on new tasks. Lifelong learning strategies, like continual learning, transfer learning, and meta-learning seek to overcome catastrophic forgetting, and we talk about some of the inspirations from neuroscience being used to help lifelong learning in networks.</p>



<ul><li><a href="https://sites.google.com/site/irinarish/">Irina’s website.</a></li><li>Twitter: <a href="https://twitter.com/irinarish">@irinarish</a></li><li>Related papers:<ul><li><a href="https://arxiv.org/abs/1806.09077">Beyond Backprop: Online Alternating Minimization with Auxiliary Variables</a>.</li><li><a href="https://arxiv.org/pdf/2012.13490.pdf">Towards Continual Reinforcement Learning: A Review and Perspectives</a>.</li></ul></li><li>Lifelong learning video tutorial: <a href="https://www.youtube.com/watch?v=5wwbOBFBMbs">DLRL Summer School 2021 – Lifelong Learning – Irina Rish</a>.</li></ul>


<div class="lightweight-accordion"><span>Transcript</span><div class="lightweight-accordion-body">

<p>Irina    00:00:03    We are not the first one asking the question about what intelligence is. People think there’s a first one to ask the question or to build something for them, something, and it’s not the first time to put it mildly. It’s always trained on capacity versus complexity, and you want to find the minimum cost and minimum capacity agent that’s capable to Concord the complexity or whatever future tasks that agent will be exposed to. But if the agent feeds the wall, the agent will have to have the ability to expand itself and continue learning what they’ve learned from two years, trying to do the new AI project. The idea that it was first of all, much less well-defined then AI for new era here, you like search for a black cat in the black room, and you’re not sure if the cat is there. That’s  </p>



<p>Speaker 2    00:01:11    This is brain inspired.  </p>



<p>Paul    00:01:25    Hey everyone, it’s Paul happy holidays. I hope you’re. Well today I speak with Irina Reesh, who is currently at the university of Montreal and also a faculty member at Mila Quebec AI Institute. And I wanted to have Irina on for multiple reasons. One of which is her interesting history, uh, having been kind of on both sides of the AI and neuroscience coin. So she’s also worked, uh, at IBM as you’ll hear working on healthcare and also in neuroscience inspired AI. And we have a pretty wide ranging conversation about much of her, uh, previous and current work. So we talk about, uh, her work on alternatives to the backpropagation algorithm. And we talked about her ongoing work on continual learning, which is kind of a big topic in AI the...</p></div></div>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[




Support the show to get full episodes and join the Discord community.











Irina is a faculty member at MILA-Quebec AI Institute and a professor at Université de Montréal. She has worked from both ends of the neuroscience/AI interface, using AI for neuroscience applications, and using neural principles to help improve AI. We discuss her work on biologically-plausible alternatives to back-propagation, using “auxiliary variables” in addition to the normal connection weight updates. We also discuss the world of lifelong learning, which seeks to train networks in an online manner to improve on any tasks as they are introduced. Catastrophic forgetting is an obstacle in modern deep learning, where a network forgets old tasks when it is trained on new tasks. Lifelong learning strategies, like continual learning, transfer learning, and meta-learning seek to overcome catastrophic forgetting, and we talk about some of the inspirations from neuroscience being used to help lifelong learning in networks.



Irina’s website.Twitter: @irinarishRelated papers:Beyond Backprop: Online Alternating Minimization with Auxiliary Variables.Towards Continual Reinforcement Learning: A Review and Perspectives.Lifelong learning video tutorial: DLRL Summer School 2021 – Lifelong Learning – Irina Rish.


Transcript

Irina    00:00:03    We are not the first one asking the question about what intelligence is. People think there’s a first one to ask the question or to build something for them, something, and it’s not the first time to put it mildly. It’s always trained on capacity versus complexity, and you want to find the minimum cost and minimum capacity agent that’s capable to Concord the complexity or whatever future tasks that agent will be exposed to. But if the agent feeds the wall, the agent will have to have the ability to expand itself and continue learning what they’ve learned from two years, trying to do the new AI project. The idea that it was first of all, much less well-defined then AI for new era here, you like search for a black cat in the black room, and you’re not sure if the cat is there. That’s  



Speaker 2    00:01:11    This is brain inspired.  



Paul    00:01:25    Hey everyone, it’s Paul happy holidays. I hope you’re. Well today I speak with Irina Reesh, who is currently at the university of Montreal and also a faculty member at Mila Quebec AI Institute. And I wanted to have Irina on for multiple reasons. One of which is her interesting history, uh, having been kind of on both sides of the AI and neuroscience coin. So she’s also worked, uh, at IBM as you’ll hear working on healthcare and also in neuroscience inspired AI. And we have a pretty wide ranging conversation about much of her, uh, previous and current work. So we talk about, uh, her work on alternatives to the backpropagation algorithm. And we talked about her ongoing work on continual learning, which is kind of a big topic in AI the...]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 123 Irina Rish: Continual Learning]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2021/12/art-123-01.jpg" alt="" class="wp-image-1616" />



<p class="has-text-align-center"><strong>Support the show to get full episodes and join the Discord community</strong>.</p>



<div class="wp-block-image"><a href="https://www.patreon.com/braininspired"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2021/09/become_a_patron_button@2x.png" alt="" class="wp-image-1434" /></a></div>



<div class="wp-block-image"><img width="322" height="400" src="https://braininspired.co/wp-content/uploads/2021/12/IR_Mila_July2020_portrait.jpg" alt="" class="wp-image-1617" /></div>



<p>Irina is a faculty member at MILA-Quebec AI Institute and a professor at Université de Montréal. She has worked from both ends of the neuroscience/AI interface, using AI for neuroscience applications, and using neural principles to help improve AI. We discuss her work on biologically-plausible alternatives to back-propagation, using “auxiliary variables” in addition to the normal connection weight updates. We also discuss the world of lifelong learning, which seeks to train networks in an online manner to improve on any tasks as they are introduced. Catastrophic forgetting is an obstacle in modern deep learning, where a network forgets old tasks when it is trained on new tasks. Lifelong learning strategies, like continual learning, transfer learning, and meta-learning seek to overcome catastrophic forgetting, and we talk about some of the inspirations from neuroscience being used to help lifelong learning in networks.</p>



<ul><li><a href="https://sites.google.com/site/irinarish/">Irina’s website.</a></li><li>Twitter: <a href="https://twitter.com/irinarish">@irinarish</a></li><li>Related papers:<ul><li><a href="https://arxiv.org/abs/1806.09077">Beyond Backprop: Online Alternating Minimization with Auxiliary Variables</a>.</li><li><a href="https://arxiv.org/pdf/2012.13490.pdf">Towards Continual Reinforcement Learning: A Review and Perspectives</a>.</li></ul></li><li>Lifelong learning video tutorial: <a href="https://www.youtube.com/watch?v=5wwbOBFBMbs">DLRL Summer School 2021 – Lifelong Learning – Irina Rish</a>.</li></ul>


<div class="lightweight-accordion"><span>Transcript</span><div class="lightweight-accordion-body">

<p>Irina    00:00:03    We are not the first one asking the question about what intelligence is. People think there’s a first one to ask the question or to build something for them, something, and it’s not the first time to put it mildly. It’s always trained on capacity versus complexity, and you want to find the minimum cost and minimum capacity agent that’s capable to Concord the complexity or whatever future tasks that agent will be exposed to. But if the agent feeds the wall, the agent will have to have the ability to expand itself and continue learning what they’ve learned from two years, trying to do the new AI project. The idea that it was first of all, much less well-defined then AI for new era here, you like search for a black cat in the black room, and you’re not sure if the cat is there. That’s  </p>



<p>Speaker 2    00:01:11    This is brain inspired.  </p>



<p>Paul    00:01:25    Hey everyone, it’s Paul happy holidays. I hope you’re. Well today I speak with Irina Reesh, who is currently at the university of Montreal and also a faculty member at Mila Quebec AI Institute. And I wanted to have Irina on for multiple reasons. One of which is her interesting history, uh, having been kind of on both sides of the AI and neuroscience coin. So she’s also worked, uh, at IBM as you’ll hear working on healthcare and also in neuroscience inspired AI. And we have a pretty wide ranging conversation about much of her, uh, previous and current work. So we talk about, uh, her work on alternatives to the backpropagation algorithm. And we talked about her ongoing work on continual learning, which is kind of a big topic in AI these days. So as you probably know, uh, deep learning models suffer from what’s called catastrophic forgetting where when you train the model to do one thing really well, uh, and then you train it to do another thing.  </p>



<p>Paul    00:02:25    It forgets the first thing and humans don’t suffer from this problem. And it’s an important problem to tackle moving forward, uh, in deep learning. And we discussed many of the methods being used to try to, uh, solve continual learning and some of the inspirations from neuroscience along those lines. We also talk a little bit about scaling laws, which is roughly the relationship between how big and complex a model is and how well it performs over a range of tasks. We also talk about definitions and Irina’s definition of artificial general intelligence and how she views the relationship between AGI and continual learning. And we talk about a lot more, so you can learn more about Irina in the show notes at brain inspired.co/podcast/ 1 23 on the website. You can also learn about how to support the show on Patrion and possibly join the discord community of Patrion supporters who are awesome.  </p>



<p>Paul    00:03:21    Thank you guys. And thank you for listening. Enjoy, you’re actually kind of a perfect fit for this podcast, uh, because on the one hand you have a background in a lot of, uh, computer science and I guess your early work was in applied mathematics. So you kind of come from that side, but I know that you’re interested in using, among other things among the many things that you’re interested in in using some principles and ideas in neuroscience to help, uh, build better AI. So could you just, um, talk a little bit about your background and how you came to be interested in, uh, being inspired by, by brain facts and et cetera?  </p>



<p>Irina    00:04:00    That’s a very interesting question. Indeed. Sometimes I ask myself and I tried to dig into the past. The question is called for in the past. We want to go, uh, indeed, um, a couple of years ago I joined Mila and university of Montreal. But before that I was at DBM research and I was there for quite a long time initially in their department of computational biology where I did indeed focus on neuroscience, neuroimaging and applying statistical methods, machine learning, AI to analysis of brain data. So that’s mainly where I kind of really got, I guess, deeper into neuroscience, psychology psychiatry type of topics. And I’m still collaborating with a group that’s my kind of long-term collaborators in computational psychiatry and neuro imaging, uh  and his, um, uh, his friends. So that was really exciting. And that’s, I guess, where I actively was, uh, pursuing this idea of the intersection between AI and neuro. But I think the interest in that intersection started long time before I even joined IBM. And I usually I realized that I could track it back to my, uh, I think, uh, elementary or even middle school years. I used to go to muscle Olympics in Russia. I don’t know if it’s interesting it’s at this too long of an answer.  </p>



<p>Paul    00:05:30    Well, let me ask you this. So, so when I was in college, there wasn’t even a neuroscience program, um, uh, degree, uh, and I don’t know that I would have been about, you know, I started in aerospace engineering and then moved on to molecular biology. And I don’t know if neuroscience was available as a program, whether I would have actually chosen it. But, uh, so I was going to ask if that’s what was limiting, if you had that kind of kernel of interest, why then go into applied mathematics? Uh, that was your first degree, right?  </p>



<p>Irina    00:05:58    Right. Yeah. I mean, I probably should explain and did, uh, what I wanted to mention, uh, the reason was from practical perspective, I was going to mass Olympics and I was quickly realizing that, um, you don’t have that much control over your brain and like, you want to solve a problem. And then you kind of hitting the wall. It’s like, what’s going on there. And sometimes it works. Sometimes it doesn’t, you want to understand like why and how to make it work better. And then you see people around you, some of them struggling unable to solve anything. Some of them are able to solve like more than you do. And again, you wonder what the difference, what does it take? Then you start reading books, like, uh, polio, how to solve it, how to learn, how to solve problems and this and that. But it really came from very practical goal.  </p>



<p>Irina    00:06:49    Like I need to figure out how to solve all those problems quickly because I want to be in Delhi. So what do I do? My brain doesn’t seem to be cooperating. So what should I do? Like how do I make it work? So you start digging into how to make brain work. And then you run into books accidentally, which say, well, whether machines can sing, it was Russian translation. I guess, of the famous student’s work. That gets me into thinking about AI when I’m like 14 or something. That’s why I go to computer science. Uh, the closest to computer science at that point, uh, kind of in Russia was like applied math, essentially applied math slash computer science, but formally applied maths. Um, and that’s kind of, that’s how it goes from there. So like my focus on computer science and AI actually came from them really very practical goal. Like, I need to understand how this brain works, so I make it work better. That’s pretty much it. Then you realize that it’s like biology, psychiatry, neuroscience, and many other things that study brain, like whatever goes.  </p>



<p>Paul    00:07:57    Okay. So, so at IBM then you were part of a team, I think you said you were in computational biology division and say you were a part of a team that was sympathetic to, uh, using principles from biology to help make machines better. Right.  </p>



<p>Irina    00:08:13    Okay. The focus of the department was not on machines. The focus was on health care. So the focus was on how to make, uh, humans cross here. Uh, that was conditional biology in your science and neuro imaging, kind of, um, um, groups focus. So it’s not the focus of that group was not really on AI. And I was kind of back and forth between focus on AI and computer science and machine learning to biology. And back originally I started in machine learning group, uh, for distributed systems, uh, changed names a few times. And I moved to this competitional biology center. And then in the past few years before moving to Mila, I moved again from computational biology department to AI department of IBM. So, as I said, I was kind of iterating between the two multiple times. The focus in those past couple of years before moving to Mila was indeed to bring, uh, kind of neuroscience, inspirations and ideas to improve AI. So that was my, my latest focus on IBM was indeed on what you’re asking about. And that was new AI, uh, kind of, uh, project between IBM and MIT. Uh, that was going on. That kind of remained part of my focus when I joined Mila and was part of their direction for that, um, seven year program, uh, that I’m leading the Canada, excellence is a chair. Then your AI component is one of the kind of, uh, excess along, which things are supposed to be developing.  </p>



<p>Paul    00:09:56    We’ll see. Okay. So this is what I wanted to get to because, um, I, I’m curious about your experience about your colleagues and their sort of opinions about using, uh, neuroscience or neuro inspired, um, tools, uh, to build better AI because that’s very much the industry side of it. And you have a lot of like passionate opinions about whether we should be looking to the brain to build better AI or whether we should just continue to scale with what we have. Right. All right.  </p>



<p>Irina    00:10:24    I know both sides really well. We just ran our scaling laws workshop last weekend from blond. Yes.  </p>



<p>Paul    00:10:31    Okay. So, so what is your IBM colleagues, if you feel free enough to talk about it, what was kind of like their reception of your focus on using neuroscience?  </p>



<p>Irina    00:10:40    Uh, again, the colleagues in AI department, her colleagues in the bio department? Well, I think actually this whole new era AI new era for AI and AI for are all these ideas actually glue out my multiple interactions and discussions with my friends at a competitional psychiatry and neuroscience department. And primarily you share machete. And I think what really also helped to shape my views was the introductions, which enrolled, not just discussing, uh, technical aspects of AI or technical aspects of brain imaging, or even your science, but there was most of philosophy in those discussions because luckily Jeremiah had first degree in philosophy and the second in physics and then went to neuroscience. And I think that really made him like stay stand kind of apart and had of many colleagues. So I’m really, really, really grateful for those discussions because they helped me to shape my views as well.  </p>



<p>Irina    00:11:44    So what I’m trying to say that while the healthcare department, the combined department was focused on healthcare applications, the idea of using neuroscience and biology inspirations for improving AI, um, was very exciting for at least several people there. And it’s still exciting. And we would like to kind of do more along those lines. When I moved to AI department at IBM, uh, again, it was kind of a mix of opinions because just like in the field in general. Um, and I agree with that view, it may not necessarily be the case that the only path to intelligent, uh, artificial intelligence systems is, um, mimicking the brain more over like even my colleagues, neuroscientists never said that we have to mimic the brain. Their freaking question is like, what is the minimum part of those inspirations that might be necessary to transfer? And that’s just like,  gives us example about airplanes, not flooding their wings, right?  </p>



<p>Irina    00:12:54    So you don’t have to copy the biology. Exactly. And yet you want to come up with some common laws that govern the flight, right? Um, some aerodynamics of intelligence in our case. And that’s the tricky part. And that’s, I think where everyone agrees. So nature found the solution. Uh, there are some properties of that solution that might be specific to evolution and nature, and perhaps we can obstruct from them, but there are some parts. The question is which ones that are probably in variant or necessary for any intelligence, finding those invariant properties is a good open question. And I think that’s subconsciously everybody doing AI is trying to do that, but I definitely, I’m not in the camp of first. You need to completely understand how brain works and only then you can create artificial intelligence. I don’t think so. Just like with airplanes and engineers there, they don’t have to be like a biologists understanding birds perfectly. Right, right. You need to understand enough, but the good question is what is enough  </p>



<p>Paul    00:14:10    Interesting because we’re going to talk about a few of the, um, neural inspirations that you have focused on. And in some sense, um, I don’t mean this is not an insult at all, but it’s almost like, uh, we are sometimes cherry picking and just trying kind of one thing at a time. We think this might be important thing that might be important. Um, when what we, what we could do, which you say is not the right path. And I agree as we could, uh, instead of cherry picking, you know, these, these facts that we’re going to discuss in a little bit, um, you really could go more all in and try to, you know, there are people trying to quote unquote, build a brain, right. And they’re still having, you know, to abstract out a lot of things. But, um, but that push would be to build in more that we know about the brain that rather than less, it seems so. Um, but I want to ask you, uh, before we move on about philosophy, I happened to see a panel that you were on. I don’t remember the source. It may have been a,  </p>



<p>Irina    00:15:11    The main conference. We had an interesting discussion about philosophy therapy.  </p>



<p>Paul    00:15:17    It didn’t go that that far, but you, you, you got into a back and forth a little bit with Syria Ganguli and, uh, who finds philosophy useless, and you made the push that, uh, in fact it is useful. So I just wanted to,  </p>



<p>Irina    00:15:31    It’s useless. You can learn from anything, but let’s not go there.  </p>



<p>Paul    00:15:38    Okay. So, so you don’t want to, um, make a, uh, a case for philosophy.  </p>



<p>Irina    00:15:44    I can make useful philosophy. I, um, I think what happened there, maybe it was as usual, by the way, it’s not specific to the panel. Uh, people mainly disagree because of differences in their kind of definitions or interpretations of terminology. And unfortunately that’s a universal problem. And the field, like many concepts are not well-defined. And in general, I mean, that’s the main reason people argue because when people actually nail down details of what they are being for, or against surprisingly many cases, they all agree. So I think the, the problem was what people, uh, understood as philosophy when I say the word philosophy and it means something to me, it probably meant different things to different people. So they were really arguing, not with my point, but they were arguing with their own understanding of the world. Yeah. That’s why, because I don’t think Syria or anyone else will disagree in general that if you have different disciplines, whether it’s philosophy or neuroscience or psychology or psychiatry or any, any, any type of discipline that studied mind and its function, how it works and what are the mechanisms at different levels in different ways and philosophy is one of them.  </p>



<p>Irina    00:17:13    And even more, it’s not just the whole Sophie. I mean, you can think about Buddhism. And I brought this example to me, it’s empirical science of how mind works, which has several thousand years of knowledge accumulated in very different terminology and so on. Uh, but that’s, that’s a data that’s that’s knowledge accumulated by people from observations, right? So there is some truth to it. And the question is like how to deep that through out, since we coming from different fields, use different terminology again, how do you translate philosophy and Buddhism to machine learning slang and sense? So people will understand it, not everything there might be relevant, but we are not the first one asking the question about what intelligence is, what usually amazes me. And again, I don’t mean that doesn’t solve, but, and plus it’s very natural. It’s always happens, but people think there’s a first one to ask the question or to build something for something. And it’s not the first time to put it mildly. There are many bright minds that for many years, we’re facing similar type of questions just in different circumstance. So I think it might be useful to learn more about what they found.  </p>



<p>Paul    00:18:42    It does seem to be a recurring theme that, um, there’ll be a hot new trend. And then it turns out a hundred years ago, someone already had written basically the answer, you know, and laid out the groundwork, the groundwork for it that, uh, then, then we go back and, uh, something that we resolved, uh, had already essentially been solved.  </p>



<p>Irina    00:19:03    It is. I mean, it’s not specific to our field or our time. Right. It’s always been like, that’s probably always going to be like that. Uh, but, uh, that’s just why I mentioned philosophy. And, uh, also, I mean, I know, I know I essentially meant the same thing that Supriya was saying himself, that we are trying to, um, kind of discover the Kuvan laws behind intelligence, uh, whether biological or artificial and kind of pushing it forward common laws behind how mind works or how it could work and, um, how you can kind of affect it in different ways. So it works differently. And I think 80, any source of knowledge about like people asking similar type of questions and finding whatever answers, any information like that you can learn from all these data. All I actually was suggesting that, yeah, let’s try to learn from all data input data being different disciplines,  </p>



<p>Paul    00:20:13    But okay. So there, there’s a problem here, right? Where, um, throughout the years, all the different dif disciplines have continued to progress and it is essentially impossible to be an expert in all disciplines. So how, you know, what’s the right,  </p>



<p>Irina    00:20:27    That’s why we need AGI and he’ll be an expert in all of that.  </p>



<p>Paul    00:20:32    And they can tell us which disciplines we need to learn, but we won’t be  </p>



<p>Irina    00:20:35    The knowledge for us and conveyed to us in understandable manner. I’m just quoting that young, short scifi story from nature, but it’s only half joking.  </p>



<p>Paul    00:20:49    Yeah. Yeah. Well, so, so you are interested in, um, that that’s a goal to build AGI and we’re going to talk, uh, about lifelong learning and a little bit, I want to ask you about backpropagation first, but would you say that’s one of your, uh,  </p>



<p>Irina    00:21:03    Uh, yes and no. AGI is not the final goal in itself. It’s an instrumental goal. The final goal, as I was always putting AI as augmented, rather than artificial intelligence, to me, just the goal of building AGI never felt truly motivating. Like why do I care about machines?  </p>



<p>Paul    00:21:32    Well, do you know what AGI even is? I don’t really know what AGI is because that’s another thing where people have different definitions.  </p>



<p>Irina    00:21:39    Yes. It’s one of those terms and machine learning, which is not well-defined. And I know that’s a that’s creates lots of confusion and there is had two debates in the Mila on the topic of AGI. There are different definitions and different people again, mean different things. One practical definition could be just stick to the words, let’s say it’s artificial general intelligence. So junior role means capable of solving multiple really multiple problems. To me that means general broad, versatile, which relates to continue learning or the learning or transfer learning, but kind of push to extremes. So like truly versatile AI that can do while, at least pretty much anything we can do, not a narrow opposite of narrow, broad general. So that can be just a relatively clear definition, at least to me of what AGI would stand for. There are many other definitions and we probably could write like a list of different ones, but I think, yeah, you’re absolutely right. It’s not the term. It’s not the mathematical term.  </p>



<p>Paul    00:22:59    Do definitions matter  </p>



<p>Irina    00:23:01    Definitions matter. I mean, yes and no again, so you can have different definitions. What matters is for people before they start kind of working together on something or discussing something to agree on definitions. Because again, the main reason for debates, sometimes unending debates is at the core that people did not agree on definitions. And what comes to my mind whenever I listened to machine learning, people debating something or pretty much anyone debating anything, the picture of the elephant and seven blind men touching different parts of the elephant and saying, no elephant disease, no you’re wrong. And funders that no, it’s, you’re wrong. And they’re all right, and nobody’s wrong, but they didn’t agree on definitions and they don’t see the full, full picture.  </p>



<p>Paul    00:23:51    That’s all I’ve come to think that the purpose of debates is to talk past one another and not progress at all.  </p>



<p>Irina    00:23:58    That’s not the purpose to me. It’s a sad reality. Yeah. You can do that. You will probably have some fun, um, maybe for some limited amount of time and then pretty much you just wasted the time and everybody moved on. So what was the point? I don’t know. I mean, Yes. If you try to learn something or converge to something or make some progress, then probably not.  </p>



<p>Paul    00:24:28    Okay. So you and I agree. That’s good. We don’t need to debate that issue then. Okay. So, um, you’ve done work. One of the ways that you have fought to use neuroscience is, uh, on the question of backpropagation and, um, maybe before, because you had, you’ve done work on what’s called auxiliary, uh, variables like a backpropagation alternative. Um, so I’d like you to describe that and you know, where that came from, but before doing that, um, could I, cause we’ve talked about backpropagation multiple times now on the podcast, I had Blake Richards on way back when, um, you know, um, uses the morphology of neurons and the, uh, electrically decoupled, uncoupled, um, apical, dendrites, and blah, blah, blah, burst, firing, et cetera, as an alternative. And now, you know, there’s this huge family now of alternatives to backpropagation. Um, I’m curious about your overall view on, uh, that progress, that literature.  </p>



<p>Irina    00:25:29    Yes. Uh, yeah, that’s very good question. And actually, in fact, we are working right now with a group of people at Mila and outside of Mila, um, on, uh, extending different target propagation. So basically that line of work is still going on, although it was a bit in the back burner for awhile, and there are as usual, at least two motivations here, whether you come from neuroscience and you try to come up with a good model of how, uh, essentially learning happens at the brain, um, basically how they are created assignment for mistakes happens in the brain. And whether by propagation is a good model for that, or you can come up with a better model. So this is one motivation and many people who kind of are less concerned with, uh, competitive performance of alternatives to backpropagation and more concerned with really understanding how it works in the brain.  </p>



<p>Irina    00:26:26    They focus on that. And I also totally agree with that view, as I said, I mean, there is no contradiction once you clearly state what your objective is, you cannot say that they are wrong or you are right, the vice versa because they just optimizing different function. They want to answer the question, how we best model what happens in the brain. Their objective is not to beat you on amnesty, insofar as long as we all agree on what objective is, it’s not wrong. It’s interesting line of research and that’s kind of initially what motivated, uh, also work on, um, beyond back probe kind of just trying to understand things better. And Blake is definitely, uh, doing lots of things in this direction and other people, but on the other hand, there is, uh, another objective. Like if you come from the point of view of AI person who says, okay, I understand I want my analogies with brain, if, and only if it helps me build more effective, more efficient algorithm.  </p>



<p>Irina    00:27:31    So when you come from that objective, you can start wondering purely computationally, what are the limitations of backpropagation and, um, what could you do differently or better and how to solve those kinds of shortcomings? And usually people were always claiming that, yeah, there is problem of vanishing gradients or exploding gradients. Yeah. There is a problem that, uh, basically backpropagation is inherently sequential because you have to compute this chain of gradients and you have to do it sequentially. But again, one hand in brain processing is purely parallel second and computers. If we were able to do it in parallel, it probably would have been more efficient and better would scale better as well. So you want this parallelism, you want to avoid possible gradient, uh, issues. Uh, so what do you do? And that’s where many optimization techniques came starting from this. Um, essentially Yummly con’s own seizes mentioned alternative tobacco propagation that later was called target propagation.  </p>



<p>Irina    00:28:38    And all it meant is another view of the problem. So basically instead of just optimizing the objective of the neural net with respect to the weights of the neural net being unknown variables, you’re trying to find you introduce an auxiliary variables or different names. Uh, it all comes from the just three wheels kind of, uh, equality constraint there that those activations. So he then units, uh, they are equal to what do that, uh, linear function of previous led layer transformed by some non-linearity and you can play with it. You can introduce extra auxiliary variables, just the linear ones and another one, uh, another set, nonlinear transformation of those songs. Of course you can write it purely as a constraint optimization problem. You can modify constraint optimization and to just like this like ranch and whatever. So yeah, I mentioned that in the thesis and kind of was looking into that later.  </p>



<p>Irina    00:29:39    So it’s not something that was not kind of considered before. Just people didn’t really try to push, uh, directly optimization algorithms that would, um, take into account those exhilarated variables explicitly. And to me, the work from 2014, uh, was ass paper for getting the name right now. I mean, that basically motivated us to start looking into auxiliary variable approaches. And then there was a whole wave of this optimization approaches anyway. So they all try to do the same thing. They try to introduce activations or linear pre activations as explicit variables to optimize for that would reformulate your objective function for neural net learning in terms of two sets of variables, one being your usual Bates and the second being those activations, and that had some pluses and minuses as everything. The pluses would be that once you fix activations aid completely, decouples their problem into local layer wise sub problems.  </p>



<p>Irina    00:30:52    So you can solve for those weights completely in parallel. Um, basically the chain of gradients is broken and it’s good. So you don’t have by definition, any vanishing gradients or exploding gradients, because there is no chain. And second thing, you can do things in parallel. So those two things are good. There is also some similarity and more kind of biological plausibility because you take into Cod now activations and this neural net explicitly as variables and essentially interpretation of that is also, you view them as a noisy activations, which they are unlike classical neural nets, where they always deterministic variables, uh, deterministic conditions of the real neurons. Their real neurons are not fully deterministic functions, right?  </p>



<p>Irina    00:31:48    So the nonlinearity is a separate thing, but even just the fact that in artificial neural net, they’re totally deterministic. That’s also quite an kind of simplification. So, uh, there are other kind of, um, I mean, there are other flavors of this auxiliary variables and kind of target replication methods in our, uh, kind of approach, which is essentially in line of the, um, uh, the subsidiary variable optimizations, where you can write the joint objective in terms of activations and weights. Uh, there think here is we still use the same baits for kind of, for work, um, propagation or basically computing output given input, as well as for the optimization or in a sense like backward pass. There are other flavors like, uh, target propagation by Jaan electron and a different Stargate propagation by your show and his students and all flavors of methods. On top of that, they use two sets of eight, the forward maids and backhand weights, which may be even more biological plausible than those exhilarated methods that I mentioned.  </p>



<p>Irina    00:32:57    And then there is lots of kind of flavors and variations on that. And actually, it’s nice to see this subfield expanding recently, and there were some papers that new leaps last year and so on and support. So it’s all interesting. It has its pluses and those things such as personalization and, uh, by definition, lack of vanishing, gradients and exporting gradients, no matter how deep the network is or how long is a sequence in a recurrent  or something, those are good, but you move into different optimization space and empirically, whenever you try this methods on standard benchmarks and standard architectures, they are not all this performing as well as a classical backpropagation. And that was one of the issues with the whole field of alternatives to tobacco problem, how to make them competitive. There are multiple successes, but they’re are not like completely kind of putting back prop out of the picture, plus we didn’t aim to do so.  </p>



<p>Irina    00:34:11    We had some successes on fully connected architectures. We’ve had successes on Sofar and MDs. We’ve had some successes even on our Nance and even on simple cognitive, but what we learned, I mean, that was good because it was the first time when you actually do see those improvements, um, in, um, uh, in the paper at new rapes, uh, by, um, Sergei. But, um, yeah, and, um, uh, Jeff Hinton and others, uh, they also kind of were trying different alternatives, like target and various sorts of that. And unfortunately they couldn’t show that it would be, or be compatible with a standard back prop on Ponce image net. So there was this kind of mini, uh, unsuccessful attempts. There were some limited successes, and the question is still open, whether such alternatives can become true state of art. And the hunch is, I think you shouldn’t beat your head against the wall, trying to use alternative optimizations like that on architectures like convenance and so on, which were so well optimized to work with standard pro you need different architectures.  </p>



<p>Irina    00:35:25    And maybe the fact that we really tried hard to beat backdrop on classical resonates, and it didn’t really work. Maybe that’s a problem, but has not go well with, but something else will go well with exhilarating variables and target probe. It’s a hypothesis, but I think it’s kind of something to try. Anyway. I think, I think if you make those methods work, you will get benefits of much better personalization and scalability. You will not have this nagging issues of potentially exploding or vanish ingredients, but you will have other problems. You will possibly have convergence problems and automating minimization type of, uh, algorithms and so on and so forth. I mean, uh, there is no free lunch.  </p>



<p>Paul    00:36:19    So since you mentioned architectures, I want to pause and ask you, um, if you think looking to the brain for architectural inspiration makes any sense at all, or, you know, because it’s a whole system with lots of different architectures interacting. And if you’re thinking like different optimization methods might work better or worse with different architectures, if, if that is, uh, another avenue where it, where we should look,  </p>



<p>Irina    00:36:47    I think it would be useful to explore it again. Uh, there is this, uh, cheated debate within the field inductive biases versus scaling the most generic architectures say transformers, or even scaling multilayer. Perceptron ultimately, it’s a universal function. Approximator if you scale it enough, it probably will do anything, right. It’s probably just not going to scale very efficiently to put it mildly. So yeah, that’s why maybe transformers are better, but the question, okay, here is the sync inductive biases or priors versus scaling very generic type of networks. Uh, I might be wrong, my personal opinion, but I think just like historically, whenever you have not enough data. So in brain imaging, sometimes it’s small data sets or in medical applications. So on so forth when they don’t have enough data, then using priors or inductive biases from the domain is extremely helpful. They take role of regularization, uh, constraints.  </p>



<p>Irina    00:37:59    And if those civilization constraints are prior, so inductive biases, right, they help them in the sleep and you can perform really well despite having small amounts of data. And that’s where you could kind of use specific architectures and so on. And by the way, that’s why say convolutional networks were so successful for such a long time, right? But that you start scaling, right. And the amounts of data, if you have those amounts of data, they go way beyond what you had before. Your model size goes way beyond what you had before you scale the number of parameters while maintaining like some kind of structure of the network, like to scale with some depth on something, there are many kind of important caveats here about how to do it. So, so scaling model will actually capture the amount of information while you scale data. So, I mean, there are smarter ways to do that and less smart ways, but say you do it right now.  </p>



<p>Irina    00:39:08    Uh, it looks like those priors inductive biases become less and less important. And we do have empirical evidence say visual transformers at scale in terms of data start outperforming convenance. And by the way, that’s why I think looking at scaling laws is so important. You have two competing architectures, you see how the scale and you see that in lower data regime, covenants are so much better in higher data regime, it’s vice versa. And that approach that kind of empirical evaluation of different methods architectures or whatever you compare, uh, by looking at the whole curve rather than point that one architecture, another architecture, one data set, and other dataset. It’s not that informative, all those kinds of tables there, plots, scaling those flows, uh, giving you much fuller picture and give you better ideas. See if you can scale what type of methods you should invest into.  </p>



<p>Irina    00:40:14    And apparently if you can scale visual transformants would do better than that. But still the question remains. What if there are inductive biases such as maybe those brain architectures and so on that can improve your scaling exponent, essentially what it means when we talk about scaling exponent is that, um, empirically it’s fun that, uh, the performance of models expressed as, um, uh, basically the call center pillows on the test data and, or a loss or classification accuracy on downstream tasks. Uh, they usually seem to improve according to power law. You’ve seen probably those papers by Jared Kaplan and his colleagues from OpenAI. And now, I guess in Tropic and so on and so forth. And, uh, all those papers show you power laws, which are straight lines and logo blood, and the exponent of power law responds to the slope of that line and Lola plot and the whole billion dollar question in the scaling laws field is what kind of things improve that slope.  </p>



<p>Irina    00:41:22    So you get better improvement in performance for smaller amount of scaling, therefore cheap and scaling, by the way, involves here, not just scaling the data and scaly bottle size, but also scaling amount of compute because you may just even keep the model fixed and data fixed, but let your algorithm compute more. And sometimes you see very interesting behavior like grokking paper from workshop a day clear last spring, where they just ran their method for long time, just good to kind of kill it. And then there was certain phase transition from almost zero to almost one accuracy. They just managed to find some point in the search space. Yeah, but it was not intentional apparently, but it happened to be that for that type of benchmark and, uh, architecture, they used, it was a case that somewhere in the search space, there was a place, it was extremely good solution surrounded by not so good solutions.  </p>



<p>Irina    00:42:28    And if you find that place, you can jump there. And that’s your face transition from zero to one. Anyway, we kind of trying to explore those face transitions recently, uh, with my colleagues at meal as well. So back to your question, the question inductive biases versus scaling as usual, um, I would say maybe inductive biases plus scaling because certain inductive biases maybe can improve the exponent and at least you want to explore that they might be useful. I wouldn’t kind of throw the water. I wouldn’t throw the baby together with the water, as I say the saying, ah, okay, let’s just scale multi-layer perceptron yes, of course you can do that. It will be just extremely inefficient. Don’t you want more advantages scaling laws and maybe inductive biases can help you reset it. Doesn’t have to begin the two camps fighting each other. Although I understand it’s more fun to fight than collaborate.  </p>



<p>Paul    00:43:36    Sure. It’s more fun to do both. Right. Just like a scaling in inductive biases. The answer is always both.  </p>



<p>Irina    00:43:42    Yeah, it doesn’t have to be either, or it could be both. The question is what inductive biases help to improve scaling. And that’s a good question. It might be, it depends like, um, Jared Kaplan was presenting at our workshop a couple of times we had two workshops so far one in October, one just now last week. And again, he mentioned that in his experience, uh, again for that setting, that problem for GPT three, um, improvements due to architecture did not seem to be as significant as just kind of scaling and that’s totally fine. It doesn’t completely kind of excludes the station when inductive bias has maybe some, for some other problems would be much more important.  </p>



<p>Paul    00:44:32    How do you explore the full space of architecture as though you have some, some limited amount of exploration, right,  </p>



<p>Irina    00:44:39    Right. Um, that’s a good question. I mean, it just like miss everything, neuroscience inspired, it’s such a huge space on its own. And to be completely honest, you cannot just go and ask the scientists. What, in your opinion is the most important inactive bias that AI people should use? It doesn’t work this way. At least not in my experience because they say, we don’t know, like you tell us what you need and then maybe we can think and suggest what kind of inductive bias can best help you with what you need. So what I’ve learned from two years trying to do the new AI project, the idea that it was first of all, much less well-defined than AI for new era, where you take methods to analyze the data. That’s we know how to do here. You like search for a black cat in the black room, and you’re not sure if the characters there that you would have for AI, but I think what has interaction with those neuroscientists?  </p>



<p>Irina    00:45:49    Let’s say, look, I need, okay, you’re asking what I need for a I’d like my system to be able to continue to learn. I mean, well, it has to be learning how to do new tasks and work on new datasets and it should keep doing this because I might have my robot walking in the wild and it has to adapt, or I might have my personal assistant chatbot and it has to adapt to my changing mental states or it has to adapt to other people. So I don’t have to keep looking into different data environments tasks and doing them well at the same time. I don’t want it to completely forget what it learned before, because it may have to return to it and may not have to remember. Um, I don’t really push for absolute lack of support. Catastrophic forgetting fast remembering is fine.  </p>



<p>Irina    00:46:45    So just like basically a few short learning of new things and few short remembering of all things would be just fine after all. I don’t remember myself, the courses I took in the years of my undergrad, but I could remember them hopefully. So you want that? How does brain do it? And that may be more specific questions you say, okay, how does continue learning happen? What are the tricks? And then people in AI actually using those inspirations. I mean, this whole, a bunch of continual learning methods were inspired by some kind of some phenomena and neuroscience, for example, um, kind of freezing or slowing down the change of certain weights and the network. If those weights are particularly important for some previous tasks in, uh, it was kind of more formalized in word like EWC, um, consolidation or synaptic intelligence. Also, uh, there are many of those regularization based approaches, uh, essentially in continual learning.  </p>



<p>Irina    00:47:53    And it’s kind of one flavor of, uh, well, very obstructed inspirations, I would say, but from this phenomenon that does happen in the brain or replay methods again. So this classical example of, um, having hippocampus that essentially forms very quickly, um, kind of memories, but then they’ve consolidated say during sleep and you kind of have this longer term knowledge and prefrontal cortex. So having this learning systems approach. Yeah. Yeah. So that is another example. Again, it’s very much simplified and obstructed. It has its roots in neuroscience, but then it kind of gives rise to machine learning algorithms like, uh, rehearsals to the recorder. So they play junior different plays so on and so forth, but they deal. Yeah. Um, there is also the third kind of direction that, uh, people take usually in continual learning. Um, more like architectural based approaches when you essentially expand your network model, expand your architecture depending on, uh, the needs of your learner.  </p>



<p>Irina    00:49:10    And that also has its roots and you can connect it to things like adult neurogenesis that even adult brains apparently do grow new neurons when they do so in specific places like hippocampus, the dangers of hippocampus, it’s still happening there. It doesn’t stop, um, at the beginning of adulthood and yes, there was dogma. So two years ago or more that adult brains do not grow new neurons. Well, apparently they do in mammals, in rats, they do it in the hippocampus and olfactory bulb, as you can imagine, or factory bulb is very important for rats and other memos because they do use smell quite a bit. And humans, apparently factory wealth doesn’t matter as much anymore, but we still have some neurogenesis happening in hippocampus. And what was interesting and the kind of did some work on top of that at the paper about neurogenetic kind of model or the very simple version of that.  </p>



<p>Irina    00:50:17    But the idea was all the empirical evidence about neurogenesis in the literature suggests that there is more neurogenesis happening when the animal or a human is exposed to radically changing environment like different tasks or different environments in continual learning. So then it is associated with more junior neurogenesis. If the environment is not very complex and it’s not changing, you kind of, don’t really seem to need to expand capacity of your model. Like you have some new neurons being born, but they die apparently pirates. So it’s like use it or lose it. If you don’t need extra capacity, you won’t have extra capacity. If you keep challenging yourself and you keep kind of pushing yourself to extreme, so totally new situations, the new neurons will be incorporated and your hippocampus will expand somewhat. I mean, to some degree, of course, as everything. So it’s, it’s an interesting observation and it associated with possible ideas of expanding architectures in continual learning to accommodate for new information that cannot be possibly represented well using existing model. So yes.  </p>



<p>Paul    00:51:37    So let me just recap what you’ve said there, because you, you covered a lot of ground. So, um, you kind of just transitioned. I was going to ask you about continual lifelong learning and you just transitioned into it. So, and you talked about the three, um, neuroscience principles that, uh, have been, uh, implemented and the whole point of a lifelong learning that there’s this huge problem in deep learning called catastrophic forgetting where once the network is trained on one task, if you train it to learn a new task, it forget completely forgets the old task. Right. And so there’s been this explosion in, uh, lifelong learning methods. I, one of which is continual learning is that under the umbrella of lifelong learning, because there’s transfer learning, meta learning, continual learning, and now there’s medic continual continual meta and on and on. Right. Okay.  </p>



<p>Irina    00:52:28    Yes. Okay. So it’s again, a question of terminology, like kind of stepping on the same rake of machine learning terminology. Again, uh, I gave it to duty that they CML last summer, uh, one way actually to me, and I guess to many people, lifelong learning and continue learning as synonyms. And they all just mean that you would like your model to, uh, learn in online scenario where online means you get your samples as a stream instead of, um, I mean, you can get them as sequence of datasets or a sequence of badges or a sequence of just samples. But the point is you have that sequence of, uh, data you keep learning and you do not have an option of keeping the whole history of datasets, uh, or you might even have the option, but you might not want to constantly retrain because it’s not so efficient.  </p>



<p>Irina    00:53:31    So continue on lifelong learning. And this approach are synonyms metal, continual continual methods, and so on. So forth are still visiting the umbrella of continual learning, but kind of different formulations of how you might go about training your system to do that. Continual learning again, as I said, uh, I’m sure people have different definitions. So in my mind, it’s, uh, a particular definition of continual learning, which just means online. Non-stationary learning by non-stationary. I mean, any change to their environment or input data residents, a change of data distribution, it’s a change of task or both. So, um, as to transfer. Okay. So transfer learning again. I have the slides in the tutorial, as well as in the class, uh, slides for the continual learning class. They all online on my web page. Uh, we, I gave it for two years in a row. It was a winter semester class, uh, 2020 and 2021, but I’m not giving it this year because I will be teaching neuro scaling loss.  </p>



<p>Paul    00:54:50    That’s your new thing. Okay.  </p>



<p>Irina    00:54:51    Well, continue learning is related to that. It’s not like, uh, we completely jumped to something unrelated. It is related, uh, but with more focus on them, uh, scaling models, data and compute, and continuous learning, being a problem that you’re also trying to solve. That’s what, but back to your question about transfer, um, Matta and the Meda patients. So on first of all, I did my slides second, uh, while the slides are based on their very, very nice tutorial, uh, by, um, I think the launch from 2000, uh, 20 anyways. So the picture defines each of those, uh, problems and shows how they are different and how they similar a transfer usually assumes that you have two problems. And by learning on one, you trying to kind of, uh, trying to be able to use that knowledge, to do better on the second problem as it is not necessarily any notion of remembering or being equally interested in doing both problems.  </p>



<p>Irina    00:56:00    It’s like more unidirectional question of terminology to meet transfer is a property of your model and algorithm. And continual learning is a setting in which you would like transfer to happen. Uh, which means while learning, I always would like to improve, or at least not make it worse. My performance on the past, which means backward positive transfer, or it is backward non negative transfer at the same time. I’d like to hopefully learn better and faster in the future because I own the learned so much. So ideally I would like to have some positive transfer to future. And that view of, uh, not equating continual learning with catastrophic forgetting issue, but rather more general view of continual learning as a problem of maximizing transfer in both past and the future that kind of also came out of our joint paper, um, med experience replay from 2019 with Matt dreamer.  </p>



<p>Irina    00:57:10    And I very much kind of support that view more general view of continual learning. And especially when it comes in the context of not just supervised or unsupervised, but continuous reinforcement learning as the ultimate continual learning setting. Yeah. So that’s kind of where these different aspects may align and come together, uh, to build the kind of the same, same, I dunno, one big picture of what we’re trying to achieve. Agent that can adapt and still be able to return back to Lauren, maybe right. Retraining slightly. There’s a few shorts like the flexible avoiding or forgetting and a adaptation that is fast. And they aspects such as transfer playing the key role. The meta-learning is essentially very similar to continual learning, but it does not assume that you are different data sets came in a sequence, they available to you at the same time. And you try to learn from them common model that will be very easily adaptable to any new environment or data set.  </p>



<p>Irina    00:58:27    Um, presumably from the same distribution of datasets. So that’s essentially meta-learning and by the way out of distribution, generalization is another related that’s appealed, which is to me, essentially zero short meta-learning because out of the distribution setting is give me a multiple data sets or environments. And I will try to learn a model that, um, basically distills some cumin in variant robust features or in general Coleman in variant robust predictor. So that next time you give me data set for testing that is different from my drink, it’s out of distribution. And he had shares that invariant relationships, which are essentially closely related to the causal relationships. If you give me that I will do well on that. So it’s extreme case of meta-learning cause metal learning will tell you, I want to do well on that out of distribution dataset, just give me a few samples. So they all, all this terminology in a sense comes together and has many shared, uh, aspects. It’s just, uh, as I said, unfortunate Babylon tower situation and machine learning that makes it difficult. The set of ideas is much less dimensional than the ambient dimension in terms of our terminology. So many of the machine learning compression it’s long overdue.  </p>



<p>Paul    00:59:54    Well, it’s also, yeah, it’s also the variety of problem statements, but I want to ask you about that in a second, because so, so just backtracking once more. So you talked about three inspirations from neuroscience to help with continual learning. One was, uh, the variability in plasticity say you, uh, you want to remember a certain task, uh, there’s evidence that those, um, those synopsis form stronger connections and are less likely to, uh, change, right? And so you lower the plasticity moving forward so that you can, uh, maintain good skills on a certain task. Right? You also talked about the complimentary learning systems approach, uh, using the, you know, these two different learning and memory mechanisms. One is fast and, uh, uh, very specific and then that’s associated with the hippocampus and then there’s this, uh, general slow, um, and generalizable learning happening. And that’s associated with the, the neocortex. And of course, uh, you use replay, which has been used. And, you know, I think originally with DQN dissolve the Atari games, um, and  </p>



<p>Irina    01:00:55    So much much before  </p>



<p>Paul    01:00:59    That’s. Okay. No, thank you for correcting me. I knew I was right when I said originally, I was like, Ooh, that’s not  </p>



<p>Irina    01:01:07    Good,  </p>



<p>Paul    01:01:09    But, uh, okay. And then the third was, um, essentially inspired from neurogenesis, which is the idea that, uh, especially in the hippocampus of an adult, you can continue to form new neurons and that this might help us informing new memories. So what I wanted to ask you about, um, is the issue of the facts in neuroscience, continuing to change because it’s an empirical science, right? So I thought that recently there was backlash against this idea of neurogenesis and adults. And, you know, it’s like one year milk is good for you the next year. It’s really terrible for you, uh, based on new evidence. So there are these, I didn’t know that the evidence was still pointing toward neurogenesis as a concrete fact, right. It’s because we’re always finding out new things. So the story is always being revised in neuroscience. And, um, I’m wondering if that affects, uh, your own approach, because if we hang our hat on neurogenesis and it turns out we actually don’t create new neurons or something, um, you know, then how do we move forward from there? Well,  </p>



<p>Irina    01:02:08    You think about computational, uh, I don’t know, machine learning kind of side of that. Even if it happens in a nature, there was no neuro neurogenesis to start with. We don’t care. We have algorithm that works better.  </p>



<p>Paul    01:02:24    Yeah. You can always go back to that now you’ve now you have your AI hat back on. So  </p>



<p>Irina    01:02:28    Exactly. That’s why I never have only one hat I have multiple, and I actually don’t even think it’s effective to have single hat ever. Although I understand it’s kind of, people tend to have one hat because we are, so I don’t know, we’re so kind of tied to the notion of our identities, including the identity in terms of scientific views. So Coleman it’s so dear to our heart, but in reality, the notion of identity in general is much more vague than we might say, but I don’t want to go into philosophy here because it takes a long time and we don’t have the time it’s a separate conversation anyway.  </p>



<p>Paul    01:03:21    Okay. So, um, so then coming back to all the li all of these different problem statements and how you were saying, you know, are different definitions, but really they’re all kind of converging, um, onto like a, uh, a lower dimensional problem space, given the various approaches to training and testing and metal learning, you know, all the things that you just covered. Do we actually understand how humans learn well enough, the developmental literature, like psychology, like, uh, do humans do all of these different problem statements or would it, is there? So I know that there’s a little bit of work, at least on, you know, the humans actually showing a little bit of Castro, catastrophic forgetting in certain circumstances, but do we actually understand human behavior enough and how humans learn not at the neural level, but at the behavioral level, um, to map that onto these different continual learning metal learning, transfer learning, do humans do all of that to humans? Do some of it, you know, does that make sense?  </p>



<p>Irina    01:04:21    It definitely makes sense. Well, first of all, I mean, there are many people who kind of focusing on this type of studies and development though, uh, neuroscience. Um, so I really would love to read more about that as you just mentioned earlier. Uh, no human being, unfortunately can be on top of all the literature and all the fields related to mine. So there is lots of interesting recent work as well. And I have some colleagues at UDM working on that. Uh, for example,  in psychiatry department then, uh, Elaine Miller in neuroscience department. Uh, there is a lot there about in terms of, okay, not looking at Euro, just looking at the behavior and asking the question, whether human do transfer learning, or continue learning particular settings, I think, yes, because in a sense, the notions of particular settings in continual learning, they came out of, uh, researchers thinking about like, what do we do in real life in different situations?  </p>



<p>Irina    01:05:24    Say we have robotics, uh, what are kind of scenarios there? The robot moves from one room to another room, environment changes all the settings and specifically of them, oh, now it’s not just room didn’t change, but I gave it over different tasks. They all came out of our anecdotal observations of our own behavior of like, just common sense that he’s an egg in our heads about what people do. So in a sense, yeah. I mean, whatever you have right now in continual learning fields, for example, or transfer, it came out of our knowledge about human behavior, because in a sense where else would it come from?  </p>



<p>Paul    01:06:06    Well,  </p>



<p>Irina    01:06:08    Yeah. So in this, in this sense, yeah. But, uh, more on that, like more specifically, like study is about how it’s being done and like what affects that, what kind of makes it better or worse for that we would need to talk to our colleagues in psychology and your science and read more about that. And I’m pretty sure, as I said, I mean, I’m not claiming I’m completely on top of any literature  </p>



<p>Paul    01:06:34    That infinite hats. Well,  </p>



<p>Irina    01:06:37    That would be, that would be the ultimate goal. But for that first I need to create AGI. Then you connect with CGI and then you have your argumented brain, which can finally stay on top of all that literature. That’s my true motivation.  </p>



<p>Paul    01:06:55    All right. All right. Okay. So I have one more question about, um, about continual learning and then we’ll begin to wrap up here. Someone in the brain inspired discord community, uh, asked whether the learning trajectories of artificial networks impact, um, have an impact in continual learning, right? What, how much it retains and how much it forgets over successive tasks and training. Have you, do you, have you studied the learning trajectories at all? Because that’s something that’s being looked into in deep learning theory these days, uh, as, uh, something that matters.  </p>



<p>Irina    01:07:30    Yeah. The learning trajectory. Uh, okay. The question is also, it depends on several things. It depends on say the sequence of tasks like curriculum learning, which can matter a lot. Indeed. Um, actually it matters not just for continual learning. It matters for say adversarial robustness, even it matters for various aspects of the and model, like how what’s it consists of data was even to it and how they arrived there. But of course it also depends on say particular optimization algorithm that basically different trajectory leads you to different state in the weight space. It leads you to different model or think about the different, yeah. Different brain artificial brain. Uh, and uh, of course they will different trumps of their properties in terms of forgetting. And so-and-so, I guess when we were looking into, again, I’m thinking about this matter paper Ms. Mud and others, that obviously obviously the trajectory matters.  </p>



<p>Irina    01:08:34    The question is how do you, how do you know what the local kind of constraints or local kind of ambitions you should use, uh, to push trajectory into the desired, uh, kind of direction? Because like, all we can do is just to use kind of, uh, some local information just like, as gradient is right. And I guess things that change trajectories. I mean, as I said, data can change trajectory, all kinds of realizations can change trajectory, basically regularization. So precisely things among other things that change trajectory is a lot, you have objective standard objective of your senior on that, and you’re trying to optimize for that. And then you add things, one example, say you say, I really would like to make sure that I have that positive transfer or at least not negative. Let me add as a constraint, the product of gradients or new and old samples.  </p>



<p>Irina    01:09:36    And I want the things to be aligned. I don’t know. I do not want my new gradient to point in the direction opposite of the gradient on previous samples, because that would mean I will be decreasing performance on the past task. I will be forgetting. So I’ll try to add at least locally categorizer again, like a matter of paper with smart, for example, just one example that will push my weight kind of trajectory in that direction and so on and so forth. So basically any regular riser you put there by some desirable features, but without, of course any grantee, because it’s all local, it will change the trajectory. So in a sense, the whole field of continual learning, playing with at least the regularization based field, it’s all about changing trajectories and they are changing the final solution. So it  </p>



<p>Paul    01:10:33    Wasn’t good theory about how to do that. Really  </p>



<p>Irina    01:10:37    Again, I don’t want to claim, I mean, there are, there is various work, there was a paper on them, like try to theoretical understand the continual learning algorithms, but for specific types of them, there’s this, um, what was the name of  gradient descent and, um, okay. There is various work, but uh, to me, continual learning is still the field that is lagging behind quite a lot in terms of theoretical understanding,  </p>



<p>Paul    01:11:06    I was going to ask what your outlook is for continual learning. If we saw solve continual slash lifelong learning is the same as solving AGI and, you know, are you optimistic about, you know, what’s what, what is the normal number that people say 20 years? And that’s when we’ll have solved everything it’s always 20 years or so. Right. Oh,  </p>



<p>Irina    01:11:26    Some say less than that.  </p>



<p>Paul    01:11:29    What’s your number,  </p>



<p>Irina    01:11:30    Pam.  </p>



<p>Paul    01:11:32    10 years for lifelong learning. Yeah.  </p>



<p>Irina    01:11:35    Okay. And AGI  </p>



<p>Paul    01:11:37    That equates to AGI or when we solve lifelong, is that solving AGI? Okay.  </p>



<p>Irina    01:11:43    Again, difference in terminology. I apologize to people who might like really strongly disagree with me. And I know some people who will all going to say,  </p>



<p>Paul    01:11:56    Let’s drop some names.  </p>



<p>Irina    01:11:57    I know, no, no, no, no, we’re not going to do that. But those people know, well, there is also this kind of view, uh, which is still an open question, the pure about, so what does it take to solve continue learning, whether it equates to AGI and so forth. So if we assume that AGI for all practical purposes is general artificial intelligence by junior road, it’s versatile broad. It can kind of learn to solve pretty much any task that is, as people often put it economically available. So say AGI is a kind of a model that can solve all economically available tasks, say as good as, or better than human. Uh, something like that. Uh, the question is if you kind of put that agent in the wild, it will have to do continual learning, right? So it needs to be kind of sold. The question is whether you approach solving it by trying to train that agent in continuous learning manner or as scaling, um, cloud will tell us, or at least part of the scaling crowd, not overgeneralizing that maybe it’s enough just to really portray in a humongous well foundation models on multimodal data, not just language, not just video, uh, not just the images like media or perhaps even all kinds of time series data, but once you pre-train, it, it essentially solved continual learning.  </p>



<p>Irina    01:13:31    I had this question during the workshop discussed and its ongoing debate. What I would say is that for any fixed set of possible tasks that they will give continual learner, like for example, recent submission to a clear, uh, on, um, scaling and continual learning for a fixed set of tasks. Yeah, sure. Scaling model scaling amount and uh, diversity and complexity or information content of pre-training data will at some point where the complexity of the fixed set of tasks and yes, you will solve catastrophic. You can capture information of all the things that you’ve been do all of them well, but if the stream of tasks and continual learning continues growing right, infinitely will your pre-train model hit the wall at some point or not. And that’s a good question. And I think it’s interplay between their model capacity. I always keep saying that also in my tutorial on continued learning capacity of the pre-trained model that you learned and the complexity of unseen part of the universe that your agent will have to adopt.  </p>



<p>Irina    01:14:50    And I would say that what you really need to look into would be relative scaling of how your model capacity, that depends on size architecture and information content of the data you’re trained on, which depends on the amount or number of samples, but other things too, how that capacity scales visit respect to complexity of downstream tasks. So to me, relative scaling laws would be the most interesting scene to dive into. And I think it makes sense. It’s always trained of capacity versus complexity, just like great distortion theory and information theory and so on. And you want to find the minimum cost and minimum capacity agent that’s capable to kind of work well, conquered the complexity of whatever future tasks that agent will be exposed to. But if the agent hits the wall, the agent will have to have the ability to expand itself and continue learning. So to me, continuous learning is the ultimate test for anything that is called AGI  </p>



<p>Paul    01:16:03    Say that that sounds like incorporating principles of evolution into a book.  </p>



<p>Irina    01:16:09    Okay. So any pre-trained model may hit the wall and I believe it will have to keep evolving and if it won’t be able to evolve itself the bad,  </p>



<p>Paul    01:16:22    Okay. Irina, this has been fun. I have one more question for you and that is, uh, considering your, your own trajectory. If you could go back and start over again, would you change anything? Would you, um, change the order in which you learned things or order of your jobs, how would you start again?  </p>



<p>Irina    01:16:42    Ah, that’s a very interesting question. I’m not sure I have a immediate answer to that, but in one of those realities, I might’ve been taken totally different trajectory from the one I’m on right now. I probably would have been skiing somewhere in Colorado working as a ski instructor.  </p>



<p>Paul    01:17:10    Oh, I do all of that. Except work as a ski instructor. You should try what I did. Okay. I just, uh, just went uh, two days ago. Yeah.  </p>



<p>Irina    01:17:20    Look, Tim blond is pretty good too, so it doesn’t matter some good mountain and just,  </p>



<p>Paul    01:17:27    I know why don’t you come visit me and we’ll go ski and we’ll see if we can change your trajectory  </p>



<p>Irina    01:17:32    And you’re welcome to visit   </p>



<p>Paul    01:17:36    All right. Very good. Well, I really appreciate the time. Uh, so thanks for talking with me. Thank  </p>



<p>Irina    01:17:41    You so much for inviting me. It was fun.  </p>



<p>Paul    01:17:49   </p>

</div></div>


<p>0:00 – Intro<br />3:26 – AI for Neuro, Neuro for AI<br />14:59 – Utility of philosophy<br />20:51 – Artificial general intelligence<br />24:34 – Back-propagation alternatives<br />35:10 – Inductive bias vs. scaling generic architectures<br />45:51 – Continual learning<br />59:54 – Neuro-inspired continual learning<br />1:06:57 – Learning trajectories</p>
]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/338448ac-b83b-404c-92c7-1f69157d2dbd-123-Irina-Rish.mp3" length="76124999"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[




Support the show to get full episodes and join the Discord community.











Irina is a faculty member at MILA-Quebec AI Institute and a professor at Université de Montréal. She has worked from both ends of the neuroscience/AI interface, using AI for neuroscience applications, and using neural principles to help improve AI. We discuss her work on biologically-plausible alternatives to back-propagation, using “auxiliary variables” in addition to the normal connection weight updates. We also discuss the world of lifelong learning, which seeks to train networks in an online manner to improve on any tasks as they are introduced. Catastrophic forgetting is an obstacle in modern deep learning, where a network forgets old tasks when it is trained on new tasks. Lifelong learning strategies, like continual learning, transfer learning, and meta-learning seek to overcome catastrophic forgetting, and we talk about some of the inspirations from neuroscience being used to help lifelong learning in networks.



Irina’s website.Twitter: @irinarishRelated papers:Beyond Backprop: Online Alternating Minimization with Auxiliary Variables.Towards Continual Reinforcement Learning: A Review and Perspectives.Lifelong learning video tutorial: DLRL Summer School 2021 – Lifelong Learning – Irina Rish.


Transcript

Irina    00:00:03    We are not the first one asking the question about what intelligence is. People think there’s a first one to ask the question or to build something for them, something, and it’s not the first time to put it mildly. It’s always trained on capacity versus complexity, and you want to find the minimum cost and minimum capacity agent that’s capable to Concord the complexity or whatever future tasks that agent will be exposed to. But if the agent feeds the wall, the agent will have to have the ability to expand itself and continue learning what they’ve learned from two years, trying to do the new AI project. The idea that it was first of all, much less well-defined then AI for new era here, you like search for a black cat in the black room, and you’re not sure if the cat is there. That’s  



Speaker 2    00:01:11    This is brain inspired.  



Paul    00:01:25    Hey everyone, it’s Paul happy holidays. I hope you’re. Well today I speak with Irina Reesh, who is currently at the university of Montreal and also a faculty member at Mila Quebec AI Institute. And I wanted to have Irina on for multiple reasons. One of which is her interesting history, uh, having been kind of on both sides of the AI and neuroscience coin. So she’s also worked, uh, at IBM as you’ll hear working on healthcare and also in neuroscience inspired AI. And we have a pretty wide ranging conversation about much of her, uh, previous and current work. So we talk about, uh, her work on alternatives to the backpropagation algorithm. And we talked about her ongoing work on continual learning, which is kind of a big topic in AI the...]]>
                </itunes:summary>
                                                                            <itunes:duration>01:18:59</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 122 Kohitij Kar: Visual Intelligence]]>
                </title>
                <pubDate>Sun, 12 Dec 2021 22:44:37 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-122-kohitij-kar-visual-intelligence</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-122-kohitij-kar-visual-intelligence</link>
                                <description>
                                            <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2021/12/art-122-01.jpg" alt="" class="wp-image-1612" />



<p class="has-text-align-center"><strong>Support the show to get full episodes and join the Discord community</strong>.</p>



<div class="wp-block-image"><a href="https://www.patreon.com/braininspired"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2021/09/become_a_patron_button@2x.png" alt="" class="wp-image-1434" /></a></div>



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2021/12/Ko.2021.bw_.png" alt="" class="wp-image-1613" width="333" height="334" /></div>



<p>Ko and I discuss a range of topics around his work to understand our visual intelligence. Ko was a postdoc in <a href="https://braininspired.co/podcast/75/">James Dicarlo’s</a> lab, where he helped develop the convolutional neural network models that have become the standard for explaining core object recognition. He is starting his own lab at York University, where he will continue to expand and refine the models, adding important biological details and incorporating models for brain areas outside the ventral visual stream. He will also continue recording neural activity, and performing perturbation studies to better understand the networks involved in our visual cognition.</p>



<ul><li><a href="https://vital-kolab.org/">VISUAL INTELLIGENCE AND TECHNOLOGICAL ADVANCES LAB</a></li><li>Twitter: <a href="https://twitter.com/KohitijKar">@KohitijKar</a>.</li><li>Related papers<ul><li><a href="https://www.biorxiv.org/content/10.1101/354753v1.full.pdf">Evidence that recurrent circuits are critical to the ventral stream’s execution of core object recognition behavior.</a></li><li><a href="https://www.gwern.net/docs/ai/2019-bashivan.pdf">Neural population control via deep image synthesis</a>.</li></ul></li><li><a href="https://braininspired.co/podcast/75/">BI 075 Jim DiCarlo: Reverse Engineering Vision</a></li></ul>


<div class="lightweight-accordion"><span>Transcript</span><div class="lightweight-accordion-body">

<p>Ko    00:00:04    I kind of wake up every day to sort of think that maybe my research is going to help someone’s life. And I think this is kind of like, oh, well, what a great person you are. But like, I really, I mean, I think I’m going to do like a small story. Maybe this is please, you can cut it out. If it’s not relevant, let’s go to 5,000 BC trying to explain it. I’m trying time traveling back then trying to explain the motion adaptation model to them. They’ll be like, go away. Like, you know, what are you talking about? This is not, I don’t understand anything. So all these models are not real models of the brain. Like, I don’t know, how is the network failing? How do we know it is failing? And like what could be the additions that you can make to the models that improves it? I think to actually have a good quantitative, tangible grasp on those questions. I think you need a platform like brain score to actually be there. This is the model that tells you that what is going to be the predicted neural response for any given image. I think that’s what, where we are in terms of that. We think of this as a stronger test of the model, because there are many models than there can come up with different images. Then you can test those as well.  </p>



<p>Speaker 0    00:01:18    This is brain inspired.  </p>



<p>Paul    00:01:31    Hello, good people on Paul attempt her of good, uh, personhood master of none. Today. I bring you Coheed H Carr, who also goes by co master of core visual object recognition. So co has been a post-doc for the past few years in Jim DeCarlos lab. And if you remember, I had Jim DiCarlo on back on episode 75, talking about the approach that his lab takes to figure out our ventral visual processing stream and how we recognize objects. And much of the work that Jim and I actually talked about was done in part by co. Now co is an...</p></div></div>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[




Support the show to get full episodes and join the Discord community.











Ko and I discuss a range of topics around his work to understand our visual intelligence. Ko was a postdoc in James Dicarlo’s lab, where he helped develop the convolutional neural network models that have become the standard for explaining core object recognition. He is starting his own lab at York University, where he will continue to expand and refine the models, adding important biological details and incorporating models for brain areas outside the ventral visual stream. He will also continue recording neural activity, and performing perturbation studies to better understand the networks involved in our visual cognition.



VISUAL INTELLIGENCE AND TECHNOLOGICAL ADVANCES LABTwitter: @KohitijKar.Related papersEvidence that recurrent circuits are critical to the ventral stream’s execution of core object recognition behavior.Neural population control via deep image synthesis.BI 075 Jim DiCarlo: Reverse Engineering Vision


Transcript

Ko    00:00:04    I kind of wake up every day to sort of think that maybe my research is going to help someone’s life. And I think this is kind of like, oh, well, what a great person you are. But like, I really, I mean, I think I’m going to do like a small story. Maybe this is please, you can cut it out. If it’s not relevant, let’s go to 5,000 BC trying to explain it. I’m trying time traveling back then trying to explain the motion adaptation model to them. They’ll be like, go away. Like, you know, what are you talking about? This is not, I don’t understand anything. So all these models are not real models of the brain. Like, I don’t know, how is the network failing? How do we know it is failing? And like what could be the additions that you can make to the models that improves it? I think to actually have a good quantitative, tangible grasp on those questions. I think you need a platform like brain score to actually be there. This is the model that tells you that what is going to be the predicted neural response for any given image. I think that’s what, where we are in terms of that. We think of this as a stronger test of the model, because there are many models than there can come up with different images. Then you can test those as well.  



Speaker 0    00:01:18    This is brain inspired.  



Paul    00:01:31    Hello, good people on Paul attempt her of good, uh, personhood master of none. Today. I bring you Coheed H Carr, who also goes by co master of core visual object recognition. So co has been a post-doc for the past few years in Jim DeCarlos lab. And if you remember, I had Jim DiCarlo on back on episode 75, talking about the approach that his lab takes to figure out our ventral visual processing stream and how we recognize objects. And much of the work that Jim and I actually talked about was done in part by co. Now co is an...]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 122 Kohitij Kar: Visual Intelligence]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2021/12/art-122-01.jpg" alt="" class="wp-image-1612" />



<p class="has-text-align-center"><strong>Support the show to get full episodes and join the Discord community</strong>.</p>



<div class="wp-block-image"><a href="https://www.patreon.com/braininspired"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2021/09/become_a_patron_button@2x.png" alt="" class="wp-image-1434" /></a></div>



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2021/12/Ko.2021.bw_.png" alt="" class="wp-image-1613" width="333" height="334" /></div>



<p>Ko and I discuss a range of topics around his work to understand our visual intelligence. Ko was a postdoc in <a href="https://braininspired.co/podcast/75/">James Dicarlo’s</a> lab, where he helped develop the convolutional neural network models that have become the standard for explaining core object recognition. He is starting his own lab at York University, where he will continue to expand and refine the models, adding important biological details and incorporating models for brain areas outside the ventral visual stream. He will also continue recording neural activity, and performing perturbation studies to better understand the networks involved in our visual cognition.</p>



<ul><li><a href="https://vital-kolab.org/">VISUAL INTELLIGENCE AND TECHNOLOGICAL ADVANCES LAB</a></li><li>Twitter: <a href="https://twitter.com/KohitijKar">@KohitijKar</a>.</li><li>Related papers<ul><li><a href="https://www.biorxiv.org/content/10.1101/354753v1.full.pdf">Evidence that recurrent circuits are critical to the ventral stream’s execution of core object recognition behavior.</a></li><li><a href="https://www.gwern.net/docs/ai/2019-bashivan.pdf">Neural population control via deep image synthesis</a>.</li></ul></li><li><a href="https://braininspired.co/podcast/75/">BI 075 Jim DiCarlo: Reverse Engineering Vision</a></li></ul>


<div class="lightweight-accordion"><span>Transcript</span><div class="lightweight-accordion-body">

<p>Ko    00:00:04    I kind of wake up every day to sort of think that maybe my research is going to help someone’s life. And I think this is kind of like, oh, well, what a great person you are. But like, I really, I mean, I think I’m going to do like a small story. Maybe this is please, you can cut it out. If it’s not relevant, let’s go to 5,000 BC trying to explain it. I’m trying time traveling back then trying to explain the motion adaptation model to them. They’ll be like, go away. Like, you know, what are you talking about? This is not, I don’t understand anything. So all these models are not real models of the brain. Like, I don’t know, how is the network failing? How do we know it is failing? And like what could be the additions that you can make to the models that improves it? I think to actually have a good quantitative, tangible grasp on those questions. I think you need a platform like brain score to actually be there. This is the model that tells you that what is going to be the predicted neural response for any given image. I think that’s what, where we are in terms of that. We think of this as a stronger test of the model, because there are many models than there can come up with different images. Then you can test those as well.  </p>



<p>Speaker 0    00:01:18    This is brain inspired.  </p>



<p>Paul    00:01:31    Hello, good people on Paul attempt her of good, uh, personhood master of none. Today. I bring you Coheed H Carr, who also goes by co master of core visual object recognition. So co has been a post-doc for the past few years in Jim DeCarlos lab. And if you remember, I had Jim DiCarlo on back on episode 75, talking about the approach that his lab takes to figure out our ventral visual processing stream and how we recognize objects. And much of the work that Jim and I actually talked about was done in part by co. Now co is an assistant professor at York university, where he’ll be starting his lab this summer. His lab is called the visual intelligence and technological advances lab. And he’s part of a group of people who were hired into a fancy new visual neurophysiology center at York that is going to be led by none other than my previous post-doc advisor, Jeff Shaw.  </p>



<p>Paul    00:02:29    So Cohen, I kind of continue the conversation about using convolutional neural networks to study the ventral visual processing stream. And on this episode, we talk, uh, about that background a little bit, and also CO’s ideas for where it’s going. So as you may know, what started out, uh, as a forward convolutional neural network has since been extended and expanded and co continues to extend and expand both of the models to account for object recognition and experimental work that will be used in conjunction with the models to help us understand visual object recognition. And that includes adding other brain areas and therefore models to more wholly encompass and explanation of our visual intelligence. So I get CO’s thoughts on what’s happening, what will happen and how to think about visual intelligence and a lot more topics I linked to his lab and he is hiring as he says at the end. So if you’re interested in this kind of research, you should check it out. I link to it in the show notes at brain inspired.co/podcast/ 122. Thank you as always to my Patrion supporters. If you decide you want to support the podcast for just a few bucks a month, you can check that out on the website@braininspired.co as well. All right. Enjoy COHI teach car.  </p>



<p>Paul    00:03:49    Uh, are you an electrical engineer? Are you a neuroscientist? What the heck are you?  </p>



<p>Ko    00:03:55    Um, yeah, I think I’m an electronics engineer, according to my undergraduate, um, education and training, uh, and then sort of a move slowly, gradually into like biomedical engineering, one step towards neuroscience maybe. And then finally did a PhD in neuroscience.  </p>



<p>Paul    00:04:12    What was it that got you interested in a neuroscience?  </p>



<p>Ko    00:04:16    I think like all of, a lot of us, I think those were discussions about consciousness and things like that, that I kind of cringe upon a little bit now, but those were the, those were the introduction to neuroscience. And I think I particularly got influenced by a lot of these very nice storytellers, like, uh, so I was doing my masters at New Jersey Institute of technology, but I was sort of, um, cross registering for courses at Rutgers where Jackie was, was a professor back then and just like listening to him and the way he talks about the brain. I think those kinds of those were sort of the initial hooks to like, oh, I really want to be in this field and be with these people and like talk about the brain with them, things like that, like sort of at a very artificial superficial level. Um, mostly, and I remember going to one talk, uh, from V S Ramachandran at Princeton and it was like, those kinds of things was like, wow, like this is such a, you know, uh, interesting system and I want to work on it. And, and I think that were the initial things that kind of like drew me towards studying the brain  </p>



<p>Paul    00:05:16    Storytellers, the  </p>



<p>Ko    00:05:18    Storytellers pretty much. And I think now I’m kind of like thinking that could be something beyond storytelling and like, uh, but, but the storytellers are perfectly fine scientists and they also do a lot of stuff that I kind of do now. So like, there’s nothing against storytelling, but I think that component that I sometimes kind of feel like, oh, what is the use of that? I think that’s really useful because like to tell a story about your science in a way that sort of attracts young minds, I think is great,  </p>



<p>Paul    00:05:46    But now, so, so consciousness and storytelling, uh, drew you in, but now you’ve discarded both of them as a frivolous.  </p>



<p>Ko    00:05:54    I don’t think I have discarded them as freewill as I, I just have, I think my time is spent better doing other things than that. I think, I don’t think those are like bad problems to work on or like you useless things. I think there’s actually very useful, but I kind of realized that that’s not my sort of, you know, a forte, like that’s not, my expertise is not doing  </p>



<p>Paul    00:06:18    What percentage of people do you think who, uh, you know, are, are drawn in, are drawn in because of like the big questions like that. And then, um, you know, I said discard or, uh, you know, whatever I said, but then go on to realize, you know, start asking very specific questions and, and kind of leave those larger things by the wayside. It’s a really high percentage, isn’t it?  </p>



<p>Ko    00:06:42    I think so. I think it’s a very high percentage, but, but I think also it probably is useful to kind of keep reminding ourselves what the big questions are and like, uh, so I think that simultaneously very important. And it’s just that the,  </p>



<p>Paul    00:06:56    Sorry. No, no, that’s fine. I was just going to say that I think part of the reason, and I don’t really know the whole reason, but I think part of the reason is that, um, those big questions get you in, and then you realize that there are a lot of big questions that are super interesting that aren’t those questions. I don’t know. Does that seem on point?  </p>



<p>Ko    00:07:13    I think that’s right. And I think there’s kind of like very similar to how I feel right now. And I think, I mean, I know that it’s sort of like saved multiple times, that it’s all about asking the right questions and like, the questions are very important, but what, at least from my perspective, I think I realized that like the answers and what do I consider satisfactory answers to those questions often determine like how you approach your science and things like that. So to me, like, it’s just not about the question is also like, what kind of answers am I satisfied with and why am I seeking that answer? I think those are the real drivers of what I actually do in the lab. Yeah. Um, yeah, of course I would like to like, you know, simulate, I dunno, uh, consciousness in an artificial system, but I think that that is going to be a very difficult, um, kind of objective to, you know, go for in a lab and get funding for it. I mean, I’m, I’m, I’m really happy that some people are trying to do that who are more privileged than probably I am. But  </p>



<p>Paul    00:08:11    Congratulations on the new job, I guess that’s not so new now, but where are you? Where are you sitting right now? You’re not at York yet. Are you?  </p>



<p>Ko    00:08:18    No, no. I’m S I’m still at, in Cambridge messages at MIT, my governance.  </p>



<p>Paul    00:08:24    So when are you headed to New York?  </p>



<p>Ko    00:08:27    Yeah, I’m starting in July. Oh, nice.  </p>



<p>Paul    00:08:31    Well, congratulations.  </p>



<p>Ko    00:08:33    Thank you. Thank you. Yeah. Um, I’m very excited and it was a very interesting hire because all of this happened during the pandemic. Yeah. I’m still supposed to go and see the department to some degree. It’s, it’s really, it’s really a virtual remote, but I’m very happy so far with what I’ve all the discussions that I’ve had with colleagues there. And then I’m very to start  </p>



<p>Paul    00:08:55    Working. You’ll be, uh, you’ll be near my, um, my postdoc advisor, Jeff Shaw up there. So  </p>



<p>Ko    00:09:02    Yeah, I’m very much looking forward forward to working,  </p>



<p>Paul    00:09:05    Um, that I, you know, I’ve asked him a couple of times he’s been pretty busy cause he just moved to New York as well. Tell him that I am still waiting for him to come on the podcast, so.  </p>



<p>Ko    00:09:14    Okay.  </p>



<p>Paul    00:09:15    So, um, so you, you, um, your, your most recent work, uh, I was a postdoc in Jim DeCarlo’s lab and Jim’s spot on the show and you guys are one of the reasons why I asked you about your engineering background is because you guys are quote unquote reverse engineering, the visual system, uh, I guess it all it off with a convolutional neural networks, uh, and the feed forward story of convolutional neural networks. Um, but, and I don’t know how you got into a deep learning, but I do know that you were discouraged at one point from, uh, from studying deep learning or using it. Can you tell that story?  </p>



<p>Ko    00:09:55    Sure. Yeah, so, I mean, it’s an old story. It’s like probably like now already 10, 11 years old. This was 2008 when I started my masters in biomedical engineering. And, um, I think I kinda realized talking to a lot of people back then as an even saying the word or saying something like, oh, I’m working with a computational model and I’m in a neuroscience program. It’s sort of like, you’re looked down upon as a fake neuroscientist. You’re not one of the real people that is doing the real neuroscience,  </p>



<p>Paul    00:10:26    Please. You’re not doing experiments  </p>



<p>Ko    00:10:27    Because I was, at that time, I was not doing experiments. And I was mostly trying to like, look at things like, for example, like, you know, I was doing the working on auto encoders or neural network models, trained with, backpropagation basically looking at how internals of these networks might match some neurophysiological data that I had or some behavioral data. It came to things that everybody including me is all excited about these days. But like,  </p>



<p>Paul    00:10:51    But that was before the quote-unquote deep-learning revolution in 2012. Right. So  </p>



<p>Ko    00:10:57    I think it was still popular back then among certain groups, I guess. But I, I just did not, I mean, I couldn’t have predicted that if I had worked on that, maybe there could have been some nice papers or nice, you know, uh, studies, uh, that I could have done. But as I was saying that, like, I kind of got a bit discouraged because like I just started realizing that, oh, this is not the real neuroscience, because I’m not sitting there with a slice of four mice brain patch, clamping. And like looking at neurons, voltage is going up and down on a stupid monitor or something. Like, I kind of feel like, you know, that’s, that’s the real deal. And I remember I, I prepared a poster for a conference and I was going to present this poster, which is work done with like these artificial neural networks.  </p>



<p>Ko    00:11:41    And I think I was so, you know, um, I was, I was afraid that I will be ridiculed at that conference in the morning of the day. I can just got out of there. Like, I’m not going to present this, forget about it. I’m going to go back and I’m going to do real neuroscience and look what I’m doing right now. So I have a, unfortunately it’s really ridiculous, but it’s kind of pathetic, but there’s a paper that I wrote back there with all these ideas of like, oh, back probably re reinforcement learning autoencoder student teacher network. And I it’s, it’s really badly written and it don’t don’t look at it, but like, I kind of use that as a joke with my friends, like, oh, only if I had, you know, pursued this, you know, like all this work from Dan and Jim like, oh, I was way before that. It was ridiculous. Uh, no, I don’t think he would take that paper is a joke. Yeah.  </p>



<p>Paul    00:12:32    Well, yeah. Well you were talking so real neuroscience. That’s interesting because what you described with the mouse, uh, brain slices and patch clamping is exactly how I cut my teeth in neuroscience because I was a real neuroscientist. Right. So you think the definition of what a real neuroscientist is has changed now, so that, uh, people, you know, doing what you do is, um, do you feel, uh, like a valid neuroscientists now?  </p>



<p>Ko    00:12:57    Yeah. Well, I kind of validated myself by doing monkey physiology and the partner patients. So whenever I’m doing that, whenever I’m leading that life, I feel like as a real neuroscientist, I mean, I still think that like, actually it helps to look at the brain and biological data to get the right perspective about the system. So I definitely value that, but I think with time, the importance of computational techniques and analysis techniques are so important now, just, I think as we were discussing, like there’s a, there’s an answer that you’re seeking. And the answer to me is it’s going to be in the form of that, uh, those models. And so like if you’re not talking that language, it’s sort of becomes difficult to communicate, it will become difficult to communicate any neuroscientific finding in the future. So I think in that regard, that might become the real talk of neuroscientists in a few years. If it hasn’t been,  </p>



<p>Paul    00:13:51    You’re looking for an answer, what’s the question.  </p>



<p>Ko    00:13:54    That’s a very good question. So I think, um, exactly. So the question that I think a lot of people are interested in is that how do we solve certain tasks? Like at least that’s the way how I look at, from leading questions. Like I’m interested in neuroscience because I’m interested in a behavior and why I’m interested in that behavior, particularly, maybe because if that behavior goes missing, I’ll be in deep trouble. So like, that’s kind of my sort of, um, way of getting into this space of like, okay, there’s a behavior then what does it mean to do a behavior? And how do you actually scientifically study? If so we measured this behavior, we operationalize that behavior with some task and we measure that. And then the understanding, or the question is that like, how does the brain solve that problem or, or give rise to that behavior. And then we start by building models of that behavior. And depending on what type of answers we’re looking for, are we looking at how different neurons come together and produce that behavior or how different brain areas are participating in that behavior? We, we try to like, you know, uh, build specific units or parts of that model and look at them carefully. So at least that’s how I formulate the question, but the bigger question is like, okay, there’s a big behavior and how are we actually solving it? You know?  </p>



<p>Paul    00:15:11    Well, so you can correct me if I’m wrong here, but the, yeah, the story is I see it, uh, from Jim’s lab and from the convolutional neural network work is that, you know, you’re trying to solve object, uh, core object recognition. Um, and you know, it started off with a feedforward neural network, uh, you know, that was built through many years. And then, um, you know, the deep learning world came on the scene and, uh, you guys realized that these networks accounted well for, uh, predicted the brain activity well, and kind of went on from there, but, uh, things have developed. So the reason why I asked what the question was, uh, is because, you know, it’s interesting, it’s almost like an isolated system, right? So you have this convolutional neural network and it is modeled, the layers are modeled after the ventral visual processing, hierarchical layers in the brain. Um, and you know, the goal is to understand a vision, right. And I don’t know what that means. Um, you, do you feel like you guys have a, where are we in, uh, understanding vision?  </p>



<p>Ko    00:16:18    Yeah. I think that there’s a lot of questions in, in that, in those sentences, because like, let me maybe like explain a little bit about what I think of what understanding means maybe like, so, um, I, I think one definition of understanding that I have in my head is that it is basically coming up with a falsifiable model of, of something. Like, if I understand something and you can tell me that it’s a wrong understanding, and if I can basically have a model that is falsifiable, which like I can make predictions and you can tell me that, oh, you’re wrong. So for example, and there could be different levels of this understanding. So I understand how my coffee machine works because I can predict which button to press and the coffee is going to come out. So it’s like a concrete prediction. You can test me like, oh, you can tell me, like, go turn on the coffee mission.  </p>



<p>Ko    00:17:04    I go, I press the wrong button music. You don’t have any understanding of how this machine works, but if I press the right button and the coffee starts coming out, but then if the machine breaks down, there’s a different level of understanding. I might need to fix it. So like, then I, then you might ask me like, which part of the machine to fix and how does it work? And there’s a more detailed level of understanding required. So in the same way, I feel like understanding vision would require multiple levels. And I think one of it is like at the behavioral level, like I predict the behavior. So that’s where we started. But all of this sort of relies on models like concrete, computational models, at least that’s what my current sort of, I was gonna say, understanding like my, my current sort of opinion of like, what understanding for me might mean is that like you have concrete, computational models that make explicit predictions about, you know, how, how our system is gonna work or, or perform.  </p>



<p>Ko    00:17:56    And then you, you get to test it and that’s sort of the understanding, and then that’s moving. Now the problem, I think usually is that if we define understanding this way, then we have to also sort of have common, I don’t know, goals of like, what are we trying to understand? Like, what is that behavior? What, and my current, uh, you know, view of the, of the field is that we actually don’t have common goals like that. We’re kind of like all doing our own things. And so I think it’s kind of important to maybe like have certain specific goals as like, this is what we are trying to predict. These are the, you know, behavior of the system. These are the neural data or something that we are trying to predict and then come together, like come up with what are the best models that can do that. And some of it is we are currently trying to do, do it with this, um, website and platform called brain score. Um, and, and trying to have integrative approach to like all kinds of data and all kinds of model and things like  </p>



<p>Paul    00:18:57    That. So how’s brain square going into are a lot of people using it.  </p>



<p>Ko    00:19:01    Yeah. I think the user base of brains is definitely increasing. And I think, uh, we are, we are having a conference now. Uh, well, we still submitted it at co-sign and we are potentially going to have a competition. It’s like, I think it’s going to feel a little bit more like an image net competition or something like that. But my, my personal opinion is that maybe someone can look at brain score and say, it’s too early for some someone to like start making these models and, or scoring them and being so concrete about it. Right. But, um, but I think it has to be done. Like, that’s kind of my goal. And like it’s, if you ask me where vision, like understanding of vision is to me, like pointing to some kind of platform, like brain score is a concrete answer that I can give, like, that’s my way of quantifying it.  </p>



<p>Paul    00:19:47    Yeah. So it’s a benchmark. And, but, you know, on the other hand, uh, benchmarks have gotten some flack because like you were talking about, uh, we don’t know whether that’s the right benchmark, right. Whether it’s the right question. Yeah. So it is concrete, but, um, I guess we’re progressing and asking better questions. W would you agree with that? Yeah.  </p>



<p>Ko    00:20:08    Yeah, absolutely. And I think there is no like three or four benchmarks that will define our understanding. So I think the goal is to have more and more benchmarks, and hopefully you, we will see that, like, because it’s the same brain that is giving rise to all that data. So if you are actually modeling that particular brain, then we should be converging to like a very small space of models, eventually, at least as the dream. Um, so of course, like there could be multiple different benchmarks and different ways people are probing the system. But I think the value add of brain score is that if we can get those, all those experimentalists and modelers on board, then they can provide those data or provide those sort of as also targets for current systems. Instead of saying that like, oh, you know, your network is never going to predict that like, okay, that’s okay. That’s fine. I mean, the networks are falsified under all possible benchmarks, so it’s not a big, you know, sentence to say, but it’s just like, how is the network failing? How do we know it is failing? And like what could be the additions that you can make to the models that improves it? I think to actually have a good quantitative, tangible grasp on those questions. I think you need a platform like brain score to actually be there  </p>



<p>Paul    00:21:21    And ask you about falsification, because you’ve talked about how that’s one of the useful parts of the modeling, uh, push is that they are falsifiable. Um, but then, you know, you have models like the forward convolutional neural network, uh, that predict what is it like 50% of the neural variance somewhere around there. Right. How does one falsify a model of  </p>



<p>Ko    00:21:42    Yes. So I, I don’t think, I mean, in, in the, in the sense of falsification, those models are all falsified anyways, but then the question is, how do you build the next best model? When I think of that, I feel like given some numbers like that, it then makes me kind of figure out if I build a better model, it should at least be better than the current numbers that are coming out of, you know, the feedforward neural networks or something like that. So I think of course you can, I mean, is the question that you dismiss the entire space of models, our family of models, uh, as like completely useless, or you say like, that’s a good start. Let’s build upon that and start adding elements to that, to build the next best model. I think I, I’m mostly motivated by this idea that yeah, like we have a good grasp and thanks to machine learning and AI for that for actually building these real models and not like time models.  </p>



<p>Ko    00:22:32    And so now that we have these models, let’s capitalize on this momentum and get, get going and build the next, probably build the next best models. Although I’m talking about models in this way, but I, my personal life is mostly spent doing experiments, trying to put holes on that, on those modeling frameworks or models. So I’m actually very happy that those are all falsified because I feel like that’s my job to falsify them. And, but the other part of my job that I feel like is important is that it’s not only just to falsify them, but also get some data that is in the same scale and in the same sort of spirit that would help build the next better, best model or something. So it’s just not good to shit on them. It’s just also provide some, you know, material for them to work on to on and become a little bit better. So  </p>



<p>Paul    00:23:19    In your, uh, so you’re doing a lot of experimentation, uh, what’s faster modeling or experiments,  </p>



<p>Ko    00:23:26    Experiments. Uh, yeah. Uh, I mean, I think building a better model is very, more difficult than doing an experiment. Uh, this is, yeah. I, I, I think I’ll debate anybody about that because I think for me, like, you know, Alex, so again, depends on which field you are, if you are building this model for, or purposes, or like, so there, of course modeling is way more faster than any behavioral experiment or any neural experiment. But if we are trying to build a model of the brain it’s it’s, uh, so it’s like, uh, we are discussing about this engineering things that, okay, I have a, I have a problem of like, how is the brain working, but the solutions cannot be anything it’s like constrained by this biological system. So there’s like a specific solution that we’re trying to look at. And I think aligning the models with that is a very, I mean, I can build a model that might solve action perception or action prediction better than the current system, but that might not align with the brain.  </p>



<p>Ko    00:24:21    I think when I said like the modeling of it’s slower, I think it’s that bit, which is like having models that are more aligned with brain because, you know, like 2012 Alex and came up, uh, and then now we don’t even talk about Alex. And in terms of the computer vision, I think no serious computer vision scientists would say like, Alex, that is my model that I started with, but it came to neuroscience and it’s still here. We are still using Alex nursery. I think things come to neuroscience and the stare for a longer time, because it’s just very difficult to falsify or like discriminate among these models even. And I think there are maybe in, I mean, for us, there are some, some of them are like, there’s some deeper questions in here as well. Maybe because like, when we say we have a model of primary vision, like, like what do we actually mean?  </p>



<p>Ko    00:25:05    We have a model of a specific human or like a specific monkey, or are we modeling the shared variance across humans or monkey or, you know, are we developing a model of the, you know, all the possibilities, like a superset of vision? Like, so, so, so how well should a model of object recognition even predict behavior of, of one subject or some neurons that I’m recording for in a monkey brain? I think we need to think carefully about those, those questions, because like, yeah, sure. Like the model might predict, you know, one neuron in a monkey’s brain at 50% explained variance, but then how well does any other neuron in any other human brain predict that other, another humans, it neurons or something like, so I think quantifying and sort of setting up the ceilings based on what we actually are, modeling, remodeling individual human beings, or individual monkeys, or, you know, the shared monkey published.  </p>



<p>Ko    00:25:58    I think those questions are sort of important. And then maybe we are done with like predicting core object recognition, feed forward responses because, you know, one monkey predicts another monkey or 50%, and there’s no way you can improve beyond that. Something like, so it’s, to me, I think because of these kinds of, um, of course I’m sitting and I’m realizing that like, this is basically like, it’s, it’s empirically challenged in something like that. Like, so it’s, it’s actually the experiments that have to provide these answers and we’re sort of like limited by technology and how well we can probe the system. So that’s why I think slower.  </p>



<p>Paul    00:26:31    Yeah. Okay. Well you said ceilings, so, uh, the way you’ve talked about it, it makes me it’s a fuzzy ceiling, I suppose. They’re fuzzy ceilings and that respect. Yeah. All right. So your I’m slowly coming around to the fact that modeling takes a long time. I did, I didn’t do any deep learning modeling. I did like a kind of a psychological model. It was very simple, right. And it took a long time, but experiments, I had to go in every day and it just was, uh, you know, years to publish a single paper.  </p>



<p>Ko    00:27:02    I see where you’re coming from. And I, I totally, I mean, that’s has been sort of my experience as well. I mean, it takes a long time to train a monkey to implant the arrays and, and get the data and maybe, you know, the area doesn’t get implanted. Well, then you have to implant again. Like there are multiple problems that can come up. Um, but I just feel like at the end of the day you have some data and if you have designed your experiments properly, that’s like, especially neuroscience, which I think is still in the dark ages. Like it’s sort of like novel data, you know, it’s like, just it’s anything you do. You can, you can basically, it’s like novel data and a target for like a model to sort of predict. And I think in that way, it’s faster because they, I can build a model like one minute, just, you know, put some two convolutional layers together and call it a model. But is that really useful or is that really taking the field forward? I mean, I mean, I, maybe I answered it too fast about like, you know, experiments are slower.  </p>



<p>Ko    00:27:59    I might have to think about it, but I think that, but I think I’m trying to sort of like tell you a little bit about why I think modeling is actually going to be slower, especially like modeling  </p>



<p>Paul    00:28:07    There’s physical time, but then there’s also heartache time. So maybe those are two orthogonal things. Right. So like the other question would be like, where do you experience more heartache and, uh, obstacles. And do you think modeling would be the answer to that  </p>



<p>Ko    00:28:23    Again, depending on your experience? So like, if, if I, if I’m like running a monkey, like after I have like a, you know, brought a monkey to the lab and done an experiment, I have zero energy to do anything else in the day. So it’s like I’m done for the day. And, um, and I think that way, yes, it’s, it’s a lot more, I mean, at least because that’s the experience I had, I can’t tell how bad it is for like a modeling person or how banks like to come up with, you know, giant Mo I mean, I  </p>



<p>Paul    00:28:53    Feel like,  </p>



<p>Ko    00:28:54    Like most of these, like the libraries are not loading, the version is not correct. So those are the problems that I usually face. Uh, but, but yeah, but at the end of the day, once the model is training and I don’t know, I mean, at the end of the day, I feel a modeler is going to be more disappointed because the models don’t really predict much more than the previous model. That’s like the neuroscience experiments, if it’s designed properly to begin with, I think is always going to give more insight, just a biased opinion means.  </p>



<p>Paul    00:29:24    Yeah, we’re all, we’re all biased as we know. All right, co so, um, again, correct me if I’m wrong, but, uh, the way that I see it, there’s this core object recognition story that at the core of it, uh, is a feed forward convolutional neural network. And, um, you know, you guys in Jim’s lab, uh, have done a lot to explain neural data. So that’s kind of like the basis, the way that I see it. And then from there you’ve done a lot of other work. Like you’ve started adding bells and whistles like recurrence and you’ve controlled, you’ve synthesized images to, uh, predict, you know, which neuron is going to be driven by a particular image. So you, so you’re making the models more complicated. Um, and I’ve heard you argue that these, that what we need is more complicated models. Whereas, you know, from a, from an, uh, Phil, uh, philosophy of science classic perspective, what we like are simple models, right? And because part of the problem with these deep learning models is that we don’t exactly know how they’re doing what they’re doing and to use a complicated model, to explain a complicated organ, like the brain, uh, there’s a pushback on how much that actually buys us in terms of understanding. But you argue that no, we actually need them more complicated. Why is that?  </p>



<p>Ko    00:30:49    Yeah, I think it depends on how you define complication, because I think the reason why I might say that it’s, we need more complicated models because the models are not really predicting what we are. We set out to predict. So I think making them simpler. I mean, I don’t know. I mean, I don’t, I don’t think that’s going to be the answer because the brain is complicated. So anything that is a simulation of the brain will look complicated in some sense, in the other sense, it will not look complicated because if you have correspondences and alignments with the brain, you can point to a part of the model and say, oh, that’s before. And that you can say like that’s before in the brain. So that in that way it might be, become less complicated over the course of it. Just the definition of like what complication and like, what is interpretability and what is understanding?  </p>



<p>Ko    00:31:32    I think those, and because there is no objective definition of those things. I mean, I think these kind of conversations usually, you know, lead nowhere. I mean, I kind of think I’m trying to think of this thing. Like, for example, when I was in my, in my graduate, uh, studies in may do my PhD, we had models of motion after effect. And, uh, if I spoke to anyone like at VSS or SFN, or co-signed about these models, uh, everybody would say like, oh, this is completely understandable, interpretable, simple models that we have intuitions about, which is like, okay, you show coherent. Uh, so you still show a random motion pattern and you have this motion detectors, they’re all firing and they’re all fighting equally. There is no, like, basically if you, after that, if you show a stimulus that is moving upward, the upward neurons will do something.  </p>



<p>Ko    00:32:21    And it’s going to be like some response, which is going to be higher compared to the rest of the group. If you are only showing outward motion for a long time, those are the neurons that are going to fire and get fatigued. And then when you show like random pattern, you’ll see like everything else is firing higher and there. And the outward motion detectors are kind of finding slightly lower. So overall you will have bias towards saying, okay, it’s going. Maybe the motion is going downward, something like that. And this can be modeled and people have modeled this. And I think those models compared to artificial neural networks now, like there might be considering a simpler, more intuitive understanding, understandable models, less complicated. Now I’m thinking like, let’s go to 5,000 BC. People are talking Tamil or Sanskrit and a Greek or some other language, trying to explain it.  </p>



<p>Ko    00:33:07    I’m trying time traveling back then trying to explain the motion adaptation model to them. They’ll be like, go away. Like, you know, what are you talking about? This is not, I don’t understand anything. So I’ll, these models are not real models of the brain. Like, I don’t know. And I think, I feel like the same thing is happening now, which is like artificial neural net. But, but, but, but remember like the motion model that I, that I just mentioned was predicting this, this adaptation phenomenon, this behavior. So that was kind of the, the goal of this Mo modeling effort. It had some relevance with how people have looked at the brain and neurons. And so, but if I tell this in 5,000 BC and people will be like, I don’t know, this is not mapping into our worldview. And I think the same thing might happen right now with con condition neural networks and sometimes mineralogist and things like that.  </p>



<p>Ko    00:33:50    It’s like, okay, this is too complicated. This man, I don’t, I cannot like fit into my low dimensional kind of behavioral space of like how this high dimensional, you know, areas are functioning or like responding. So I don’t take that complaint seriously because I think with more familiarity with this terms and models, that that company is just going to go away, as the models become more and more powerful in predicting different behaviors. And we will see, for example, use of having these models in sort of like, you know, real world applications. And I think that kind of fear of, oh, this is a too complicated of a system is just going to go away. And for those, for whom like this won’t go away, they’ll just probably have to live with it.  </p>



<p>Paul    00:34:32    Okay.  </p>



<p>Ko    00:34:34    But maybe the more, I think one of the reasons why I feel people like simpler models is it allows them to like maybe think through like, if the model gets stuck, what to do to improve it. And I think that to me is like real value of having a simpler, more interpretable model. And there is a question of efficiency. If you can have a complicated model kind of self-correct yourself, yourself improve itself, but it’s kind of a future goal. Maybe that might just be a more efficient way of dealing with this problem. Then, then kind of like humans kind of coming up with their own intuitions of like, what is a better model and things like, and I think we were discussing what is engineering background to me that might be something I’m more prone to accepting because of my engineering background, because I just feel like there’s a question there’s a solution and these are just tools to get to the solution. It doesn’t matter if I intuitively kind of understand it or not, as long as it’s aligned with the brain data and things like that is fine.  </p>



<p>Paul    00:35:35    So one of the, I actually got even more excited to talk to you because after we had set up, uh, this episode, uh, someone in my course asked, um, because I talk in my course, I talk a lot about, I use, uh, Jim’s work and your work to talk about convolutional neural networks and how, you know, how it relates to the ventral visual stream. And then someone in the course asked, what about the dorsal stream? Because I talk about the two visual streams and, uh, this goes back to the question of like what it means to understand vision. And I know that one of the things that you’re doing, uh, so the question was like, why aren’t there models for the dorsal stream as well? Why is it all ventral stream? And I know that you are starting to incorporate it and you have some background with the dorsal stream as well. Um, and maybe we should talk about what the dorsal stream is just, uh, to bring everyone up to speed, but w uh, what are you, so are you just starting to incorporate other brain areas now? W what is your, yeah,  </p>



<p>Ko    00:36:29    Well, the first thing is that maybe if that student is interesting in doing a PhD or a post-doc send them my way, because that’s, that’s the kind of question I was also asking about, like, what is an artist dream doing? Because I had spent like five, six years studying that also stream, which is slightly system of the ventral stream in, in sort of anatomical location in the brain plus  </p>



<p>Paul    00:36:51    Say what the dorsal stream is like, what it classically is. Uh, do you want to say it, are you happy to, as well? You can say so. Yeah. So classically, there are two ventral, uh, two visual streams, um, it hits V1, and then it kind of branches off into a ventral stream, which is, uh, what the massive amounts of neuro AI and core object recognition is about where, uh, it gets processed over hierarchical areas, uh, through V2 before it, until, uh, we suddenly have neurons that respond to whole objects, but the dorsal stream is classically the where, or how stream, um, which is much more related to, uh, the motion and spatial, uh, aspects and, uh, our actions. Right. So, um, it’s activity related to, and that’s where I spent my career is basically from more or less in the dorsal stream. Yeah. So I don’t know. Did I explain that? Okay,  </p>



<p>Ko    00:37:49    Absolutely. I I’m usually now, like, I think I’m usually very careful about assigning some behavioral function to areas. I mean, mostly start talking about anatomical locations and like, who knows, like you might find that you could also stream is just big part of corporate recognition. Right?  </p>



<p>Paul    00:38:06    Well, well, yeah, I mean, so the thing that has been, um, I guess always known, but not paid so much attention to, is that there’s a lot of crosstalk between the dorsal and the ventral stream, but we’ve kind of studied them in isolation right. As to, uh, individual separate things.  </p>



<p>Ko    00:38:24    Yeah. I mean, I think that’s a sort of, I see that as an opportunity, uh, to sort of like really take this sort of studies forward and trying to incorporate, um, looking at dorsal stream as well. Just one point I wanted to make is that I think there are folks who are beginning to build models of the dorsal stream in the same way as you know, the venture stream modeling has gone. I think I recently saw a paper from Chris Becks group and sorry if I’m forgetting other authors. I think Blake Richard was part of it. Patrick was like, there, there there’s, there’s a, I think it’s a bio archive at least there’s work done from Brian Tripp’s group tried to model the system. Like, of course, like dorsal stream has a lot of modeling, you know, prior modeling work that is not kind of similar to the convolution neural network, uh, stuff, but, but I think people are beginning to build them and, and their different objectives that they’re proposing as like sort of normative framework for like how the dorsal stream gets sort of trained up. Like, I think it was a nice hypothesis and we’ll see like, whether the data actually supports those models or stuff like that. But I think for me sort of trying to get into this area, those are really nice work because like that gives me some baseline ideas or baseline models to start testing and proving, you know, when I start designing my experiments, I think those models will really help me to sort of make a good experimental design.  </p>



<p>Paul    00:39:48    Uh, but are you building, so I actually don’t know like what kind of a model, because it wouldn’t be just the same, would you wouldn’t just use a convolutional neural network to model the dorsal stream. Right. Um, and so are you building models yourself also, or are you going to incorporate,  </p>



<p>Ko    00:40:04    I have not personally built any models right now. I’ve just been testing some of the models like, so I started testing some of the models that were mostly used for action perception or action recognition models. They have these like temporal filters, like they’re still convolutional. It’s just like more dimensions to the convolution is like a time dimension. So I think those are like good starting points because like they’re easy to build maybe because they can use the same kind of like training procedure. But I think we have to, at some point become a little bit, be okay with being a little bit, you know, uh, go lower in terms of prediction because we need to move from static kind of domain to a dynamic domain. And I think my usual experience has been that whenever you go, you make this jump, like all these models start to kind of like not perform as well.  </p>



<p>Ko    00:40:51    Not, not predict the neuro responses as well. And so I think to me, like that might be one of the reasons why maybe some people are building these models and they’re not really coming out because they don’t really predict next. So maybe backing up a little bit, like my main, um, sort of interest in this, um, docile venture interaction question kind of started when I was mostly recording, you know, showing static images to the, to the monkeys and recording their responses in it. And I, and these are, you know, uh, objects that are either like natural photographs or, you know, some kind of synthesized, uh, images. And I started thinking about my previous work in dorsal stream and it was like about motion and, you know, like there are dots moving and gradings moving, but if I think about the real world, like I never see dots moving a great exposing in the real world, like there’s objects and moving.  </p>



<p>Ko    00:41:39    And to be like, if I, my, my, if I have to have any real world relevance of my current research, I just felt like, you know, it’s a dynamic world, I’m moving my eyes and I’m moving myself and w the objects are moving. And if I think of these questions are typical, these behaviors, dorsal stream kind of pops up in any literature search that I do. It’s like self motion, you know, motion of objects or motion of like, not objects, but like maybe motion of like some something in my visual field. But, but then I was wondering like, you know, like it has this nice representation of what the object is and if the object starts to move, is it all, is, does all of it fall apart? Like what happens? And so just out of curiosity, I just started recording from these neurons and when the objects were actually moving, and then I started kind of, you know, this is, this work has not been published, but it’s like the sort of the, uh, preliminary result is that, well, it kind of can predict where the object is headed, uh, where it is moving.  </p>



<p>Ko    00:42:38    It’s not, we know from previous studies from Jim’s lab that from looking at it representations, you can tell where an object is. This was from Hong and Dan 2016, where the object is located. You can tell in a static image. So there’s one trivial solution where, okay, like, if you can tell where the object is located at different time builds, you can maybe combine that information to tell where the object is heading. It is going, what I started finding is that, like, it’s not only that it’s like, you can just take a snapshot of like, like maybe after you have started this movie 200 millisecond, 10 milliseconds later, you can just look a small time in, and you can tell where the object has been going. So it’s like, there’s a predictive signal of where the objects are headed. So that’s sort of like, then I started thinking like, maybe this is coming from the dorsal stream, or is it, you know, like, but, but again, these are, again, like ways of thinking that I’ve kind of discarded in the last few years.  </p>



<p>Ko    00:43:30    So I feel like the way to think about this is like, can a vanilla ventral stream model explain this neural responses already, then not be involved with our system at all. Maybe they will fail. And then the dorsal stream models are actually necessary to account for this neural responses. And also the behaviors that I can test based on these kinds of stimuli. So that has sort of been my approach and the docile and the quick update on the ventral stream results is that these models, they, they’re not really predictive of these kinds of responses at all to some degree. That gives me hope. Yeah.  </p>



<p>Paul    00:44:03    It’s always good to have hope when you’re starting your career. Although not that you’re starting your career, but your new, your new start. Yeah, exactly. But you said you’ve discarded, um, thinking about it, uh, in that way from like, uh, different brain areas. Is that because, uh, you’ve discarded thinking about different assigning roles to individual brain areas or,  </p>



<p>Ko    00:44:23    Yeah, sure. Absolutely. I think, I mean, that’s that whole way of thinking is like, I think it’s primitive. It’s not going to lead to like, the brain is doing what it is to like, make us go through the day. And like all areas are coming together in some form or the other. And so I think it’s, I will never, I don’t want to come up with the answer that like Darcel is doing blah, blah. Like, I think it’s just part of a system that is trying to solve a behavior. And the answer is going to be here is a model that has elements in it that are corresponding to neurons in the dorsal stream. And together they, you know, solve a behavior. Now you can ask the question if I really want to satisfy someone, like, what is it also stream green? You can start doing perturbation experiments in the model or in the brain and select what happens to the behavior.  </p>



<p>Ko    00:45:11    If I take out, you know, part of the dorsal stream or part of this and that. And, but then I’m mostly worried, like, what is the answer? Like what my answer is going to be like, oh, it takes a 10% hit for video air versus video. See, like, I feel like those are the kinds of answers that are really going to come out, but people are going to, I mean, I might spin this off as like, oh, but this is about, you know, function X or like, it’s about something about predictive coding, or I can give the answer in that form. But I think at the end of the day, it’s just going to be a big lookup table of like, you partner up this part of the larger stream, you get X hit on this particular behavior, this particular video. So I, that’s why I feel like my answers need to be in the, in the, you know, in the modeling kind of framework. Yeah.  </p>



<p>Paul    00:45:58    Words, w we’re we’re limited by our language. Uh, it turns out this very special thing that we have language also is very limiting in some respects, I suppose.  </p>



<p>Ko    00:46:07    But I think if the models can relate back to the language, I think then some of the, you know, uh, problem or the tension might be relieved a little bit, because I think now there, so for example, I mean, this is maybe slightly off topic from the dorsal ventral discussion, but like, if you look at a model of ventral stream, like you can look at brain score and they say, okay, resonate 101 or something has some numbers associated, like some scores. I can see why people have a problem with that model. And why people say this is not interpretable because like, there are parts of the model that are just don’t know what it is like, how does it map to the brain? Like I can call it some part of it, or some part is thousand different things in between that I have no clue of what they are.  </p>



<p>Ko    00:46:48    And like, maybe the model is not performing because of those, you know, computations that are happening in those layers. How do I relate this back to the brain or something? So I feel like that is a real problem. And I think it is in our interest to start coming up with commitments to different parts of the model and then falsifying them based on those commitments, if it’s like interpretable models should, to me, should be like, if I write up, so what is the interpretable thing in neuroscience? Like a paper, like the abstract of the paper is completely, should be at least interminable to anybody. So if a model has components to it that can talk to each part of the abstract, like, you know, you have a task, you have a neuron, you say something. And if you can basically map your abstract to parts of the, of the model, and if the model can map onto the parts of the abstract, clearly that I think just gives the model interpretability. And I think that level of crosstalk and language I think should exist. And I think that language, um, you know, trying to sort of develop myself to, even when I’m thinking about modeling and experiments,  </p>



<p>Paul    00:47:52    Well, I mean, after all that, about how we shouldn’t assign roles to individual brain areas, uh, you are doing some inactivation, uh, experiments, right? So what’s going on, what’s going on in there? Why are you inactivating individual brain areas?  </p>



<p>Ko    00:48:07    Yeah. So I think that that’s basically try to maybe, so there are a couple of studies that I think at least I’ve done recently, one has been already published, which is like inactivating ventrolateral, PFC, and looking at CoreLogic recognition behavior, and also looking at representations in it when the monkey was doing that task. And, uh, the goal was to basically expand or test whether these feedback loops that are existing between these areas, are they actually playing a role in that specific behavior that, that we are studying because the current models are incomplete and they’re not predicting enough. So like, it’s kind of make sense that maybe there are other areas and there are other connections that are important. So that is not to say like, POC does X, right. It’s like,  </p>



<p>Paul    00:48:54    It does everything apparently. Yeah.  </p>



<p>Ko    00:48:57    I’m sure it does a lot of things. It’s just like for, for me to actually ground this problem, it was more like water, what kind of role, or what kind of signals do I, at least from the inactivation? Cause like what kind of signals go missing in it when I inactivate them inactivate PFC and what kind of, sort of, uh, deficits do I seem behavior? And then the data, again, as I was saying is, is not like all, you cannot, um, identify objects in an occluded scene or something. It’s not an answer like that. It’s mostly like, here’s a big data set. It’s like, it’s not satisfactory to many people. Like here are the giant data set. It’s clearly like you see there’s an average effect, PFC, no PFC, okay. I’ve shown you this. There is a prediction that is coming out of a model that is like, this model is a feed forward model.  </p>



<p>Ko    00:49:48    It might not be doing XYZ. And while I like that, those are the images where these effects are also much more concentrated on. So there’s a story. There is like, okay, it’s clearly part of a system that is not the feed forward system that is maybe going beyond the current feed forward system. But like at the end of the day, I think the next step is to build a model that has a unit or module that is called, you know, VLP, IFC, and partner being that should, should produce the same kind of deficits it’s like, and this is where I think it’s a very hard thing to do. It’s actually easier thing for me to like part our PFC and like get this data and say like, okay, this area is involved, but then build this model. I think that’s going to be really difficult.  </p>



<p>Ko    00:50:31    And I think, and there are limitations to partner patient data, for example, I think, um, and this is, might be like relevant to the conversation about perturbation experiments, because I think even, even after this part innovation experiment, I think actually recording in that specific area with the same kind of task and same kind of stimuli might be more constraining for the next generation of models. And that is exactly what I’m doing currently. But at the same time I was thinking like, what kind of perturbation experiments might be like, you know, may have more, uh, benefit for the kind of models that we have right now. And that kind of led me to, um, developing mostly we say developing a lot, but it’s like basically testing, um, this sort of chemo, genetic strategies where you inject, um, a virus in a brain area. And for, in my case, I also implanted a Utah area on top of it so we injected dreads in V4. There was supposed to be like, you know, silencing or, you know, down-regulating the activity in before, and then we implant a uterus.  </p>



<p>Paul    00:51:29    Sorry, can you say what dreads are? Because we haven’t even, I don’t think we’ve even, I think we’ve mentioned them on the podcast before, but what are dreads? And then I also want to ask you, so you, uh, injected and then you in a separate surgery, then you implanted  </p>



<p>Ko    00:51:43    No, it was done on the same. Uh, it was done on the same exact surgery.  </p>



<p>Paul    00:51:48    Okay. Sorry, sorry to interrupt you. Yeah,  </p>



<p>Ko    00:51:51    No, no problem. So, so the key, so, so, so the basic idea is that you inject a virus that, that ends up, uh, sort of manifesting as a receptor in a neuron that you can activate or deactivate with various means. It’s the same idea with optogenetics, the same idea with chemo, genetics in the optogenetics to, to sort of activate or, you know, that particular receptor you need to show shine light on, on, on that neuron, right? Uh, on that area for, for chemo, genetic, you need to basically inject a drug into, into the system. And, uh, so there are some pros and cons of these two different, multiple different things. For example, you’re kind of like limited in terms of where you might want to inject for up till because you know, light delivery is tricky because you have to be mainly maybe, you know, restricted to the surface of the, of the brain.  </p>



<p>Ko    00:52:49    Deeper structures might be very difficult to target at scale. Maybe you can target like one or two neurons in, in chemo, genetic, you can basically inject the virus, the virus anywhere you want in the brain. And it kind of gets activated through the sort of like injection that you do in the bloodstream. So it basically activates or tries to activate all these receptors that has been, uh, you know, um, produced. But then there’s like temporal limitations. So after can like go very fast, quick on off, but the key, the dreads are more like museum organisms as they it’s on the effect is on for some time. And how long are in my calculus? No, no, I don’t know the weaker I would be, that would not be so good. I think it’s most so from my, from my estimates, I think it’s mostly on for like maybe a couple of hours and sort of, there is like very similar, at least the main times like museum walls.  </p>



<p>Ko    00:53:44    So, um, and what, what I have been doing is like, we have these areas that you can actually test, you can show the same images over and over again, after you have injected the activator drug, and you can sort of see how quickly, or what is the kind of, um, time course of neurons responding lower or higher. And then you can have behavior on top of it. Like the monkey is also behaving on different blocks. So you can kind of see like, you know, there are some deficits that are coming up and then the deficit sort of like go away at the end of the, at the end of the day or something. So I think I’m at least thinking of like, how do I take this? And like, make it useful for models. Like, okay, I can say like a V4 is involved in, you know, object recognition.  </p>



<p>Ko    00:54:28    That’s, I don’t know, not too many people will be interested to listen, but, but if, if, if you give me like, okay, brain score has like thousand models that all have like 0.5 correlation for V4 activity, but now I give you some V4 inactivation data and then 900 of them fall off and they cannot really predict the kind of, you know, pattern of deficits that V4 has that might be as, you know, important than, than to learn maybe the important problem. But, but as you see, like here, you need to have a model that has like a brain tissue mapping of V4. And, you know, where are you injecting the virus in the model versus in the actual brain? So, I mean, there are parts of this problem that are still more complicated, but I think this, the chemo genetic strategy, at least for areas like before, you know, where you’re injecting and these are mostly regular topic area.  </p>



<p>Ko    00:55:22    So there’s some level of, um, you know, uh, correspondence in the models. And then you have a neural data on that. So you can actually just say like, you know, like I don’t care about like your assumptions just like fit to the neural data. You have the, for neural data within, with our activation, you have, you know, your model within, with our activation just fit to all the data that you have got and then predict what happens to it or predict what happens to behavior in the model. And that’s how you validate the model. I think that is a very, I think that’s a stronger form of using sort of this perturbation experiments, because I think it’s not uncommon to see, you know, experiments where someone says, you know, this area I perturbed did nothing happened. And someone said like, no, no, no, you didn’t do this, blah, blah, blah.  </p>



<p>Ko    00:56:05    So it’s like, if the answer’s always yes and no, I think it will just stay there. It has to be sort of falsification of like competing models. And then maybe some data will be more useful than the others. The other, I think upshot of having something like this is that imagine, have monkeys that are doing these tasks in their home cages. Like we have a lot of monkeys that are trained up and they do these tasks all day in their home cages whenever they want, because they have a tablet, they can do these tasks. You can pair this up with, with that system and you just need one person to just go and inject like something in a monkey. And then basically you have days where you can like, you know, run this with an, with, with some part of their brain Concordia activated and you can multiplex, even with the viruses, you can like, you know, target inhibitory neurons, et cetera. In neurons, you can have different viruses inject in different parts of the rate that have their own corresponding activator drugs. So that the, I think there’s a lot of kind of interesting data sets that can come out of this approach, which should bear on, on the modeling questions.  </p>



<p>Paul    00:57:10    How much of your, uh, future, what I want to know is like the vision that you have for your own lab and how much of it is going to be this kind of work and how much of it is going to be modeling and so on.  </p>



<p>Ko    00:57:24    I think a lot of this is going to be this kind of work and like just pushing the boundaries of experimental neuroscience. I think the modeling is like, it’s, it’s like that’s going to be the backbone of the lab. Like the computational part is like, no answers can be provided from the lab if there is no model attached to it. So I will be collaborating with others. I’ll have people, you know, working with people in the lab who will be building probably these models as well and testing them out. But I think that, I don’t think I will be happy at the end of my career if I did not improve like, like a model or something of the system, even after doing all these different experiments. So it’s going to be a mix of that. I mean, maybe, I don’t know. I mean, I should probably mention this, like, I’m not, honestly, I’m not really interested in building the best model for corrupted recognition or dynamic visual perception or visual cognition, uh, just for the sake of building that model and understanding how the brain works.  </p>



<p>Ko    00:58:21    I mean, I don’t quite motivate myself that way, I think. And it kind of, I mean kind of interesting because like, I think for training purposes, these were the most concrete fields and most concrete labs that I thought, okay, this is where I should get trained, but I think I kind of wake up everyday to sort of think that maybe my research is going to help someone’s life. And I think this is kind of like, oh wow, what a great person you are. But like, I really, I mean, I think I’m going to like a small story. Maybe this is please, you can cut it out. If it’s not relevant. I was, I was. So I’ve been working in visual neuroscience and people know that I work in visual neuroscience back home in India. And  </p>



<p>Paul    00:59:00    What do you mean? People know like the, like India knows,  </p>



<p>Ko    00:59:03    Like my family, my family, my family, sorry, people, 1 billion people. No, no like five people, 1 billion people.  </p>



<p>Paul    00:59:11    Oh, that’s more than, that’s more than what I do. So there you go.  </p>



<p>Ko    00:59:17    Yeah. So among those five people, maybe like 10 Indian families tend to, so, and one of those 50 people, then there are some of them that I don’t, I think they have some idea of like what I might be doing, which is completely wrong. And I think I had this encounter with someone, uh, and, uh, unfortunately their kid, um, had, had got diagnosed, um, to be in the autism spectrum. Uh, and so I was meeting them and th they, they asked me like, also, what are you working on these days? And I’m like, okay, I’m working on visual cognition of saying stuff like, how do we reason and things like that. And, um, this, this person turns to the kid and, and tell them that, you know, your elder brother will one day, like, you know, it’s working towards the solution. And this kid is like, very young can understand anything of what they’re saying, but they’re basically telling them that he is going to come up with a solution that will cure you.  </p>



<p>Ko    01:00:16    Right. And it just felt like I just was feeling like I was thinking like, I’m failing enough, do that failing. I cannot find any connection to like, you know, what this translates to. And, and that really, I mean, that was kind of a pivotal, like, like a point where I started thinking, like, I need to find real connections with what I’m doing and how that really impacts or translates, not just this, you know, like the first paragraph of a grant saying like, you know, I’m working in dyslexia. Like if this is relevant to like, blah, blah, really trying to schizophrenia, et cetera. So like, it’s really trying to find them that I started actually. I mean, that’s going to be at least some part of my, my, my future research is like trying to find out how, you know, having these models that are these concrete models with brain maps, how are they beneficial to diagnosis and potentially treatment strategies insert some of these neurological disorders. And I started working a little bit towards these goals, and I’m very excited about this because I think there are real benefits. And I think you were mentioning about this, like neural control studies. I think those are the kinds of studies that are really, um, sort of giving me hope that like, there is a way to like, contribute to this, to this  </p>



<p>Paul    01:01:30    That’s kind of a magical thing. So, so that, wasn’t your motivator for a long part of your career, but, uh, from a place of guilt it’s, but, but it’s developed into guilt is a great motivator. Uh, but it’s developed into like a real motivation for you, but I never had that. I, I, I don’t care about helping people. And so I always felt bad writing schizophrenia in a grant, for example. Right.  </p>



<p>Ko    01:01:59    Yeah. I mean, it’s a little bit philosophical. Like I don’t even know. I cared about helping people in somebody. Maybe I’m basically thinking I’m trying to help people, but I’m just trying to help myself maybe thinking like, well, what if I have Alzheimer’s or something in my old age, but like, yeah. But I think currently at least I do feel like that gives me some level of satisfaction to think there is potentially some link of my research that might be getting help to some book for someone. Yeah.  </p>



<p>Paul    01:02:29    I mean, it is interesting to think how through our work, um, through your work, through people’s work, uh, your interests change and, uh, as you develop and as you ask different questions and answer different questions, it’s just kind of a magical thing. So that’s thanks for telling that story.  </p>



<p>Ko    01:02:48    Yeah. I think that that definitely like impacted me a lot. So, but, but I, I’m also, like, I think these are related issues. Like, I think, like you were asking about understanding and progress and like things that like understanding vision and visual combination. I think the moment we start to like, measure our understanding, like in the brain score where something like, then I think this answers to like the clinical translation becomes more concrete, maybe like, like, so I think they’re very related. It’s just, for me, it took me a little bit to like figure out and maybe I’m still working on it, like to figure out where exactly are the most relevant parts of it. And I think my interaction with a lot of folks who are doing autism research, like really helped, for example, I’ve been in touch with, uh, Ralph arrows at, at Caltech and we’re sort of collaborating on a project. I think those like those discussions and like reading the papers, like really, I think, I think they have a lot to contribute to what I do. And I think our way of thinking about the system has a lot to contribute to that research. Interesting.  </p>



<p>Paul    01:03:55    So, uh, w I, you mentioned the, uh, image synthesis work a little bit. Um, can we talk briefly about that because maybe you can just describe what the work is. I talked with Jim about this when he was on the podcast, but, uh, we can kind of recap because it was kind of splashy. Right. Um, and I, I kind of want to hear your thoughts on how you currently think about that work as well.  </p>



<p>Ko    01:04:18    Yeah. So this work was, was done in collaboration with , um, who’s at McGill now, and then, so me Puja and Jim were basically, we did the study together. And, um, so the basic idea was that, um, we, we were recording in V4 and we have models of before neuron. And the question was that, can you, from the model, you know, come up with stimuli using the model, can you come up with stimuli that puts the neuron in specific desired states. And, and one of the states that we considered was like, let’s make it fire the most we can. So the model will tell me,  </p>



<p>Paul    01:04:53    Yeah. So, so this is the control aspect of understanding.  </p>



<p>Ko    01:04:57    So that, that, that is like, you know, prediction and control. And this is the control part. So the models could predict, but maybe they couldn’t control because maybe the images that were synthesized. I mean, there’s a part where there’s a separate technique, which is the, how are you synthesizing the images? And maybe there are ways in which that doesn’t need to be attached to the model that, that specific model that you’re using to predict that they can be two separate things. But, but again, like for us, it feels like, you know, we were using the same model to come up with the images as well. So we came up with the images, we were trying to control the neuron, and we said, we were targeting like, okay, V4, let’s make these neuron fire as high as possible. That was one of the goals. The other goal was let’s take a bunch of you, four neurons that kind of share the same receptive field, um, properties and try to set one of them to very high and the others to be very low.  </p>



<p>Ko    01:05:47    This is like a population level, you know, control. So these were the two goals that at least we thought let’s start here. And then we were asking like, okay, this question seems like, you know, you’ve heard of this before because like, oh, what does before neurons do? Like, they, they respond to curvatures. What does V1 neurons do? They’re like Gabor’s and orientation and V2 is like texture. And like its faces, like now you come up with the stimulii and you look at them and like, I don’t know what to call them. Like, maybe they’re something, but then for us, we kind of ignored that problem. We just said like, okay, let’s just take these images and like, see whether the model’s prediction is right, because then that piece, you should show that like using these models, you can control the neurons to some, some degree. And that, that, that was basically the study. We have some success and we were comparing our, you know, success rates with like taking a random sample of neuro images are using the previous sort of thoughts on what are the stimulus space that excites these neurons, like curvatures from,  </p>



<p>Paul    01:06:46    I want to hammer this home because the, the images that, uh, drove the neurons were, and you mentioned this, but I just want to reiterate that they were terribly unnatural. Right. They’re not, not something that you would see. Well, I mean, there are elements that you would see in nature. Right. But the majority of them weren’t, they just something that  </p>



<p>Ko    01:07:06    I don’t know what even called. I mean, there’s some pixel, you know, conglomerations. Like I, I, so there, there, there are two studies that came out on the same day and I think the other images are even more scary. So this one from Carlos policy and Muslim instant Gabriel crime and we’ll show, so there, they were trying to control it, or they’re trying to like, you know, come up with the images for it. Those images look even more scarier, but like, did they have, because they have some kind of natural relevance, they look like out of a horror movie or something, but like the before and the, before images were more like texturey kind of images. And we were also restricting ourselves to like, you know, black and white images and things. So I think that that was part of the, it was constrained in certain ways that led to those images.  </p>



<p>Ko    01:07:49    But as you were saying that, yeah, I did get a lot of attention and then, but, but I think some folks have gotten excited about the wrong thing from the paper and the resulting images that drove before, I think cannot be the protagonist of the story, because I think that kind of became the story because like we like to say like faces excited, it neurons are XYZ, excited, XYZ areas. And I think in that formulation, then it about the images as sort of the, our new understanding of the system. Whereas that was not about the images. It was about look how, what you can do with this model, because this is the model that tells you that what is going to be the predicted neural response for any given image. So I think that’s what, where we are in, in terms of like, we think of this as a stronger test of the model, because there are many models than that can come up with different images, then you can test those as well.  </p>



<p>Ko    01:08:43    And I think there’s work very, very interesting work from cricket, Nico cricket, Carter’s lab about controversial stimulii. I think those are the right kinds of approaches. At least to me, like you pit these neural networks against each other and then synthesize stimuli and then test them, it’s a different kind of control experiment. But at the end, it’s basically about model separation and finding the best, the best model. It’s not about looking at those images and making kind of stories, stories about them. Yeah. The other side of the story though, is that this should not make someone feel like, oh, you know, the solves call up the recognition. This is the model. Yeah. So yeah, I mean, that, that, that’s the other thing I feel like, you know, there’s ways of presenting data that can pull our point. It’s a proof of concept study to me still. It’s like, you know, look like if you take this approach versus the other approach, this approach, like our approach is better or something like that’s kind of the way to present the study. But that doesn’t mean that our approach is like the best approach or like we are done. So  </p>



<p>Paul    01:09:46    Do you have people suggesting that we’re done, uh, do that?  </p>



<p>Ko    01:09:50    I don’t think we have people who explicitly suggest that we are done, but they might use this as an example of like, look how great the CNS are. And I think it depends on whom you’re talking to, because I can also use the same example to kind of like talk to somebody who’s just basically saying, oh, CNNs have adversarial images. And this is like a completely wrong domain of like models. I can then use this example to say, look, you can do some useful stuff, but if I’m coming up with things like, you know, you need recurrence and you need other areas to incorporate, someone might go like, but you can control reasonably Relic. Why do you need to incorporate all of that? So if you really look into the models, you know, look at the generalization of the models, it’s not that good. It’s, it’s like, again, not that is a very arbitrary, like word usage. Yeah.  </p>



<p>Paul    01:10:40    Yeah. But you feel like, um, in some sense, you’re your own worst critic, right? Because you, uh, you see all of the nuts and bolts and you see what’s missing and what needs to happen. And so do you feel like people are too complimentary are too impressed with the current work because I, you know, you should be well, yeah, I shouldn’t be,  </p>



<p>Ko    01:11:03    I think they shouldn’t be, but I think they shouldn’t also like everything else. They should just, I mean, I actually think this is our responsibility. I mean, to sort of also expose where the, I mean, if you read the two papers together, like the neural control paper and the reference paper, they’re basically one paper is sort of highlighting how you can use them. The other papers sort of highlighting, like here are the images that humans and monkeys are good and the models are failing, so these are the ways to improve it. So I think if you take all of these studies together, then you might get a more balanced perspective. And I think my goal, at least, I mean sometimes for a lot of reasons, I mean, you know, better that like you need to sell the studies in a certain way, but I think in these kind of discussions are like in papers in the discussion sections, like we, we should always be highlighting sort of the confounds or the potential, you know, places to improve these models. I mean, even for core object recognition, these models failed in very trivial ways that are, maybe some people are just reading this paper might be like, oh, this probably already solved.  </p>



<p>Ko    01:12:06    Maybe they don’t exist. Maybe this is the thing that I’ve created in my head.  </p>



<p>Paul    01:12:09    More guilt, more. Uh, yeah,  </p>



<p>Ko    01:12:12    Absolutely.  </p>



<p>Paul    01:12:14    Um, I know that one of the things that you’re interested in is, uh, visual reasoning. Right. And, uh, I don’t know if you want to explain why you’re interested in it and what it is, but, um, one of the ongoing criticisms, so, so, uh, non-human primates is kind of like the gold standard, right in neurophysiology. And you need an N of two, you need two monkeys to publish, um, classically. But, uh, and, um, recently there have been, you know, a lot of people working more and more in rodents and mice, and of course there’s always been the disconnect between mouse, brain and human brain. And one of the reasons why people like to study non-human primates is because it’s like the closest thing that we can study, uh, that resembles human brains. Uh, do you see, um, limits to studying non-human primates, uh, to, you know, get at our intelligence? And so the reason why I asked you about the visual reasoning is because you’re starting to ask, so object recognition is a fairly simple thing, right? I know it’s not simple, but you know, we recognize objects, but now you’re starting to ask a more cognitive higher cognitive quote-unquote questions. And I’m wondering if you see limits to using non-human primates for that.  </p>



<p>Ko    01:13:30    Yeah. I think the answer will be sort of, I mean, my answer to that question would be maybe based on the kind of data that I will be collecting in some sense. So the way I see this problem is that like, you know, ultimately at least for myself, I’m not suggesting that everybody has this approach, but I’m pretty human centric in my worldview. And I think my goal is to find out like how humans solve a particular problem. So they are basically like the main model that I’m interested in. Um, so I think we start from human behavior on different tasks. And ideally we’ll have a model which is like currently maybe, you know, some form of convolution neural network, which has many areas other than venture scream, like dorsal stream PFC. And they will be kind of like predicting parts of the behavior of the humans and maybe at full capacity or something.  </p>



<p>Ko    01:14:19    And I think at least one angle of approaching the monkey research would be like, can I get some neural data that might be constraining for those models might improve those models are. And usually, I mean, the way people go about it is that they collect some neural data, come up with an inference that is more, can be summarized as like a very smaller kind of principle, like have recurrence or like, like a smaller model. And then they incorporate that idea into the bigger model and ask like, do they improve my, my model, my bigger model? Uh, we, I can do that. I mean, I’m probably gonna do a bit of that basically, like saying, look like it looks like this other areas in the monkey brain is associated with this particular behavior and maybe that is going to improve my, my, my, my, my, my development of the models.  </p>



<p>Ko    01:15:08    The other thing could be like, you just directly, you know, feed the data that you’re collecting into the model building itself. So you’re getting a lot of monkey data. Then it’s a matter of like questions of like how much data is enough data. And I think we are getting more and more data. So I think this is the right time, like start putting them in the models. Like, so right now I’m involved in a project where all the data that I’ve collected is getting kind of filtered into the training part of the model and the models have been regularized with that data essentially. So like, and those models are becoming better predictors of core object recognition. So that is one way of bringing in the monkey neural data and the monkey behavior, maybe to this problem. The other way I think about this is that maybe, you know, uh, humans and monkeys share a very, I mean, it’s, maybe it’s probably proven in many ways that we share a very similar visual system.  </p>



<p>Ko    01:16:01    So if even if I just get responses of the visual neurons in it or other areas during showing some of these movies or some of these like, you know, videos on which the task is based off, I can be providing constraining data for the model of like, you know, you need to be in this representational space and then solve a problem. So, like it’s a, two-part kind of approach where the, the, the neural data is basically constraining the representational space of the model. And then on top of that, you add a decoding layer that is the reading those representation, and you can have multiple ways of decoding the task. And then you ask like which one, you know, or you can then compare it to human behavior. And I think this, this could sound novel or surprising, but like, this is exactly the thing that Jim’s lab like our lab has been doing for core of the recognition for quite a while, where we were recording in monkey brain, but then comparing the decoding models output to human behavior.  </p>



<p>Ko    01:16:57    I have now started working, like, because I was also getting the behavioral data too from monkeys. I have started now working, looking at trial by trial and like image by image behavioral correspondences with monkey neurons and, you know, human sorry, monkey behavior, but it was basically monkey neuron human behavior. We had a paper with Rishi, Roger, lingam looking at huge monkey neural responses to like wards and non words and their correspondence to human behavior on those sort of orthographic processing tasks. So I think there’s a way to like, do this kind of separated from a behavioral task is I think maybe if you’re asking, do we, does the monkey need to do the behavior for them to be relevant to this task? And I think the same applies to rodents and other species it’s just to me that the correspond, ultimately, again, as I was saying continuously to no discussions that at the end, there is a model and whatever you do, you need to kind of show that that adds to improvement of the model on something.  </p>



<p>Ko    01:17:55    And now I, from my, just what we’re talking about, I can say like, maybe my goal is like not to improve like prediction on human behavior to ceiling, but maybe it’s like, if I’m doing maybe predicting behavior of neuro-typical subjects versus, you know, people with autism, do I have some traction on that problem? Maybe like I can do, you know, like individually, et cetera, imbalances, I can create them more easily with chemo, genetic perturbation in a monkey, and then test what those representational spaces are. And those could be like kind of constraining ideas for when you’re building models of people with autism. So I think there are many, many ways in which, and I’m, I’m, I’m seeing all of this ideas and with the risk of sounding Legos, a scatterbrained person who has to, but I think at the, at the end of the day, I think these are the things that excited me. So I think I won’t be able to solve it all by myself. I am hoping that a lot of people who are kind of maybe similar minded, we all come together and kind of pry and tackle this  </p>



<p>Paul    01:18:55    SoCo neuro AI. Uh, so, you know, um, a lot of your, at least, you know, most recent career has been using deep learning models to shed light on brains on, so this is the arrow from AI to neuroscience. Um, do you see, and part of what you’re doing also is using, uh, brain architecture and neuroscience, some details to improve the models bit by bit like you were discussing. Do you see neuroscience helping AI, uh, or does, does AI not need neuro-science can AI just scale up and go to AGI or what  </p>



<p>Ko    01:19:34    That’s a interesting question. And also, I think I’m probably not the, my answer might not be that satisfactory just because of my lack of knowledge in a lot of these domains. But I think, I think of this problem in different ways. So like, if I think of this as like, okay, I’m going to build a calculator and should I constrain myself with the brain data? No, it’s going to be like terrible calculator for scientific computing or something. So like, if that’s the goal of an intelligence systems like to compute, you know, calculate things fast and like, then I think constraining it, neuroscientific ideas and data as like a bad idea. Um, now if maybe we can make a distinction of like behavioral data and actual neural data. So I, if, if I want to prioritize in my head, like which data might be more informative to building models in for AI, I think behavioral data will come first before neural data. Some of the examples might be like moral machines, kind of data. That is part of the MIT media lab. I think if we are trying to constraint a system to work like humans, the human behavioral data, I think will be key to constraint. This  </p>



<p>Paul    01:20:47    That’s kind of been the success of deep learning, right. Is because it, um, the old way in neuroscience was to build a model out of kind of intuition, uh, and then compare it to data and the new deep learning approaches to build a model and train it, to optimize it for a task, uh, like an animal or organism, uh, would perform. And so it’s all about behavior and lo and behold, the, uh, model predicts neural data. Well also, right? Yeah.  </p>



<p>Ko    01:21:12    Yeah, definitely. But I mean, I was maybe making a slight distinction between like overall performance in a behavior versus like following the pattern of human behavior and the error pattern. So like image imaging, a train models are trying to get the labels. Correct. Which is a behavior, but like humans might not always get those labels. Correct. And like the might have different patterns. So I think I was mostly thinking like this error pattern of like, what kind of decision do we make given some kind of confusing stimulii or things like that, those kinds of data might be more relevant to models if they want to sort of operate in a human regime, because I’m thinking of like a system that might be like, you know, helping somebody go through life, who are unable to do things in their life that that machine or robot has to interact with with the person.  </p>



<p>Ko    01:22:00    And then it’s, I think might be important for that, that, that system to be constrained with human behavior to some degree. Um, if are those purposes, I think behavioral data is very valuable. At least that’s how I think about it. Um, for example, also AI in healthcare might be something that is, might be very constrained and there, I think maybe the neural data might have some bearing on that. I mean, it still has to be shown, I think, I mean, yeah, but I feel like there might be some, I mean, as I was saying that this ideas of like, you know, how does the brain, uh, differ in a neuro-typical subject versus atypical subject? That kind like, it just depends on the scale of the data and how we are getting it that, and that that’s the relationship of the brain, uh, representation to behavior.  </p>



<p>Ko    01:22:49    I think those kinds of data might help us to build better models of the atypical systems and then use solutions that might be catered to the atypical system. I mean, now I’m kind of, you know, being very abstract with me. I mean, I can come up with like a dream sort of example, where if you know exactly how like a system is learning, for example, a new task, and, um, you can do that for both atypical and neuro-typical populations. You might be able to use the atypical model to kind of come up with learning sequence that produces new typical behavior, even though it’s atypical system. So I think that kind of, that is definitely within, I think the, the genre of like AI healthcare kind of like approaches. So I think that way neuro to AI links probably are more clear to me. I think generative models, um, might have a, you know, a boost if they’re regularized with neurosystem data. That, that, that is another maybe, uh, angle, but yeah, but, but it’s it’s so I, I would just not, um, what I’m mostly worried is that it’s not like it doesn’t obvious that you have some brain inspiration or like neural data is going to improve AI models. Right. That’s, that’s what I’m kind of very pushing back against. It’s like, maybe you can get behavioral data and that’s enough and you don’t need to poke around.  </p>



<p>Paul    01:24:13    Isn’t it interesting that, you know, these deep learning neural networks are based on 70, 80 year old neuroscience, like fundamentally the idea of a neural network back with even the logical units. I mean, uh, so, and you’re adding more biological constraints to your models. So it’s an interesting,  </p>



<p>Ko    01:24:32    That’s true. I mean, I’m, I’m thinking of that. Like, so the first part, I agree that that’s like, you know, that that’s where all of these ideas might’ve come up and that’s a good reason to keep, you know, looking at neuroscience for, you know, inspiration for building better models. But if I look at the last 10 years, I really don’t see a concrete example of like, you read a paper in nature, neuroscience journal neuroscience, and took that idea and implemented in a model that dropout being  </p>



<p>Paul    01:24:57    Run by  </p>



<p>Ko    01:24:59    The end, they’re like engineering hacks. Like, I mean, yeah, the groups like to use it as PR, which is, I think the reason why, so it’s great for that purpose, but I think in reality at the end, you can have it. I mean, I mean, and that’s, that’s fine to me. Like, even if you have an idea of a dropout and then you figure out how to really like tweak it to make it part of a model that does something that’s great. And I think in that way, it’s really good to have neuroscience as an inspirational kind of umbrella on top of everything. Good, good for my career and I’ll be able to talk to them. But I think, I definitely think there is, there is purpose of, of neuro, I mean, yeah, that would be use of neuroscience for AI, but we need to be careful to not oversell it.  </p>



<p>Ko    01:25:40    Maybe, maybe we should. I know, but I think it’s the other way around for me makes more, to me it’s more valuable, especially because I think, you know, you’re trying to measure data in the brain that is noisy. This is like sample limited and then build theories and models around that. Like what to expect, like how to think about high dimensional spaces, blah blah. So like to me, like once you have a model that is doing a very, you know, uh, high level behavior and very accurately, that complex system gives us the opportunity to like really figure out how to even analyze a complex system. Like, so it’s, to me that’s a huge bonus from these net networks, because you were saying this, I think have been trying to do both things at the same time, like, like build a complex system and then figure out how to analyze the complex system. And here are networks that are already built up and you can formulate like different theories based. I think to me, that’s like a huge advantage of having these networks and they stay, they really become like the starting points and the hypothesis, maybe base hypothesis for a lot of these neuroscientific experiments. So that’s kind of like, at least how I have been mostly getting excited about the, the cross tab between the two fields.  </p>



<p>Paul    01:26:54    We talked about how there’s this kind of archaic, uh, fallacy, I suppose, for, you know, naming a brain region, giving it a role. Right. And, uh, the modularity of the brain prefrontal cortex does X that, that sort of thing. Um, and we’ve talked about, well, I guess I mentioned about, um, how language actually limits us in some sense, do you feel like we understand what intelligence is? Do we have the right notion of what intelligence even is to, uh, start trying to, you know, to continue trying to build quote unquote, uh, AI?  </p>



<p>Ko    01:27:31    I, I don’t re I mean, I know which we scientists, we are thinking of, like, I think for me, I probably don’t have a complete understanding of what intelligence is, but I have a friend of understanding of what kind of intelligent behavior I would like to build models for. And so that’s where I’m just the kind of the engineering engineering, maybe like talking, because I know what problem I have defined. I won’t know the solution. So like this kind of tasks that are slightly above, you know, recognizing an object and like trying to figure out like what different agents are doing in a, in an environment, or like trying to predict what might happen next. Like these kinds of behaviors I think are fairly intelligent behaviors. And my goal is to build models and, and try to figure out how the brain is actually trying to solve that problem.  </p>



<p>Ko    01:28:18    So in that way, I’m fairly happy about the definitions of intelligence, but then again, we’ll get into trouble. Like I’ll get in trouble and saying, what is intelligence? They be like the, you know, the typical, like it, you know, scores or IQ, it IQ scores, I think they’re heavily debated. And so I just feel like, what I want to say is that we can keep debating about what is the right score, what is the right way of quantifying intelligence, but we have to do it in some way, if we want to have any measurable progress. So I have defined it in some way and I will keep, you know, improving the definition and, you know, expanding on the definition. But, but I think, uh, intelligent behaviors are, to me that controversial, anything that I can do that my three year old son cannot do almost seems like a definition of like little bit more intelligent, but he might be learning faster than me. So at this stage, like the kind of definitions like that, maybe that exists, but like, yeah,  </p>



<p>Paul    01:29:16    You have a three-year-old  </p>



<p>Ko    01:29:18    I do have a two year,  </p>



<p>Paul    01:29:19    Two year old. Is that the only child?  </p>



<p>Ko    01:29:21    Yeah. Yeah. He’s our,  </p>



<p>Paul    01:29:23    Oh man. That’s, uh, that’s kind of a hard, um, patch going through and starting a new job and all that. So I feel sorry for you. I mean, it’s a wonderful thing obviously, but you know, it’s challenging early on, so  </p>



<p>Ko    01:29:36    Yeah. Yeah. It’s uh, yeah, yeah.  </p>



<p>Paul    01:29:40     are you, um, go ahead, go ahead.  </p>



<p>Ko    01:29:45    Um, I, I must say like, it’s, I’m happier, uh, on average, um, like in con like after taking into consideration everything around the child, I think overall I’m happier that we have a son that’s the most, I will say Tiniest, like a P P equal to 0.04.  </p>



<p>Paul    01:30:07    I used to draw this, uh, a pie chart where a, that I would show people like, you know, why do you like having kids? And it’s like 51%. Yes. 49. No. Yeah. All right. Maybe I’ll cut this because I sound like a real jerk. Um, are you, uh, are you hiring in the lab? Are you looking for, uh, students? What’s what’s the situation.  </p>



<p>Ko    01:30:30    Yeah. Yeah. I’m definitely looking for post-docs and grad students to work together, uh, in my lab. So I think if folks are interested, I mean, the grad students are, they’re basically going to be, um, recruited through York’s, um, graduate program. Um, and the post-doctoral candidates. I think I’m just going to talk to them individually and then see where the sort of, you know, alignments lie in. Yeah, definitely. If, if folks are interested in, in whatever we spoke about, and maybe if they read some of the papers and things, they’re interesting directions that they might want to pursue. I’m definitely interested in talking.  </p>



<p>Paul    01:31:09    He’s the future of neuro AI flux. It’s a, this has been a lot of fun co um, congratulations again on the job. And, uh, gosh, I’m just excited for you. It sounds like you, you have a lot to pursue and, um, things are, uh, looking up. Not, not that they were ever looking down, but, um, congrats.  </p>



<p>Ko    01:31:30    Thanks, Paul. I mean, I mean, there has been a lot of promises made. I feel like I’m, I’m kind of like making a lot of promise and I hope I am able to deliver. I feel like as long as I can quantify what those promises are, I can tell you in maybe a year where I have been, how much I have, you know, delivered.  </p>



<p>Paul    01:31:47    So check in in a year,  </p>



<p>Ko    01:31:50    We should check in. But yeah, I’m excited. I think, I think this is worth doing so. So I feel like I’m, I’m all excited to get on with it.  </p>



<p>Paul    01:31:59    That’s been great, Kyle. Thank you.  </p>



<p>Speaker 0    01:32:00    Thank you so much.  </p>



<p>Paul    01:32:07    </p>

</div></div>


<p>0:00 – Intro<br />3:49 – Background<br />13:51 – Where are we in understanding vision?<br />19:46 – Benchmarks<br />21:21 – Falsifying models<br />23:19 – Modeling vs. experiment speed<br />29:26 – Simple vs complex models<br />35:34 – Dorsal visual stream and deep learning<br />44:10 – Modularity and brain area roles<br />50:58 – Chemogenetic perturbation, DREADDs<br />57:10 – Future lab vision, clinical applications<br />1:03:55 – Controlling visual neurons via image synthesis<br />1:12:14 – Is it enough to study nonhuman animals?<br />1:18:55 – Neuro/AI intersection<br />1:26:54 – What is intelligence?</p>
]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/34704c1e-d6e9-4447-bab2-9f4a31f750f7-122-Kohitij-Kar-public.mp3" length="89867309"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[




Support the show to get full episodes and join the Discord community.











Ko and I discuss a range of topics around his work to understand our visual intelligence. Ko was a postdoc in James Dicarlo’s lab, where he helped develop the convolutional neural network models that have become the standard for explaining core object recognition. He is starting his own lab at York University, where he will continue to expand and refine the models, adding important biological details and incorporating models for brain areas outside the ventral visual stream. He will also continue recording neural activity, and performing perturbation studies to better understand the networks involved in our visual cognition.



VISUAL INTELLIGENCE AND TECHNOLOGICAL ADVANCES LABTwitter: @KohitijKar.Related papersEvidence that recurrent circuits are critical to the ventral stream’s execution of core object recognition behavior.Neural population control via deep image synthesis.BI 075 Jim DiCarlo: Reverse Engineering Vision


Transcript

Ko    00:00:04    I kind of wake up every day to sort of think that maybe my research is going to help someone’s life. And I think this is kind of like, oh, well, what a great person you are. But like, I really, I mean, I think I’m going to do like a small story. Maybe this is please, you can cut it out. If it’s not relevant, let’s go to 5,000 BC trying to explain it. I’m trying time traveling back then trying to explain the motion adaptation model to them. They’ll be like, go away. Like, you know, what are you talking about? This is not, I don’t understand anything. So all these models are not real models of the brain. Like, I don’t know, how is the network failing? How do we know it is failing? And like what could be the additions that you can make to the models that improves it? I think to actually have a good quantitative, tangible grasp on those questions. I think you need a platform like brain score to actually be there. This is the model that tells you that what is going to be the predicted neural response for any given image. I think that’s what, where we are in terms of that. We think of this as a stronger test of the model, because there are many models than there can come up with different images. Then you can test those as well.  



Speaker 0    00:01:18    This is brain inspired.  



Paul    00:01:31    Hello, good people on Paul attempt her of good, uh, personhood master of none. Today. I bring you Coheed H Carr, who also goes by co master of core visual object recognition. So co has been a post-doc for the past few years in Jim DeCarlos lab. And if you remember, I had Jim DiCarlo on back on episode 75, talking about the approach that his lab takes to figure out our ventral visual processing stream and how we recognize objects. And much of the work that Jim and I actually talked about was done in part by co. Now co is an...]]>
                </itunes:summary>
                                                                            <itunes:duration>01:33:18</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 121 Mac Shine: Systems Neurobiology]]>
                </title>
                <pubDate>Thu, 02 Dec 2021 17:24:16 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-121-mac-shine-systems-neurobiology</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-121-mac-shine-systems-neurobiology</link>
                                <description>
                                            <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2021/12/art-121-01.jpg" alt="" class="wp-image-1607" />



<p class="has-text-align-center"><strong>Support the show to get full episodes and join the Discord community</strong>.</p>



<div class="wp-block-image"><a href="https://www.patreon.com/braininspired"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2021/09/become_a_patron_button@2x.png" alt="" class="wp-image-1434" /></a></div>



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2021/12/profile-999x1024-1.jpg" alt="" class="wp-image-1608" width="250" height="256" /></div>



<p>Mac and I discuss his systems level approach to understanding brains, and his theoretical work suggesting important roles for the thalamus, basal ganglia, and cerebellum, shifting the dynamical landscape of brain function within varying behavioral contexts. We also discuss his recent interest in the ascending arousal system and neuromodulators. Mac thinks the neocortex has been the sole focus of too much neuroscience research, and that the subcortical brain regions and circuits have a much larger role underlying our intelligence.</p>



<ul><li><a href="https://shine-lab.org/">Shine Lab</a></li><li>Twitter: <a href="https://twitter.com/jmacshine">@jmacshine</a></li><li>Related papers<ul><li><a href="https://shine-lab.org/wp-content/uploads/2021/09/2020_progneuro.pdf">The thalamus integrates the macrosystems of the brain to facilitate complex, adaptive brain network dynamics</a>.</li><li><a href="https://shine-lab.org/wp-content/uploads/2021/09/2021_natureneuro.pdf">Computational models link cellular mechanisms of neuromodulation to large-scale neural dynamics</a>.</li></ul></li></ul>


<div class="lightweight-accordion"><span>Transcript</span><div class="lightweight-accordion-body">

<p>Mac    00:00:04    I was like, oh my God, around the corner in this place that everyone likes to forget. And if you tell most neuroscientists about it, that aren’t studying it, they’ll yawn and pretend like they’ve got some other problem to solve, but come on, man. The cerebellum is just beautiful in a way what the thalamus is doing is really controlling the state and then any influence that happens to it or the time, whether it be a cortico influence, a basal ganglia influence, cerebella a curricular influence, a neuromodulatory influence is going to shape and change the way that the state will change over time, which is one of the most crucial factors for determining how we do what we do. These things often strike you when you least expect them. But I think they’re an underappreciated aspect of, of science or at least the pilot sites that I really love of that kind of wallowing in your uncertainty until it resolves itself, I think is one of my favorite pots.  </p>



<p>Speaker 0    00:01:00    This is brain inspired.  </p>



<p>Paul    00:01:13    Hey everyone, it’s Paul. On this episode, I bring you an appreciation for the detailed nitty gritty work being done in systems neurobiology that highlights its importance in understanding the big picture functioning of our brains. Mac shine runs the shine lab at university of Sydney in Australia, focused largely on how systems neurobiology can help us understand our cognition. We talk about a pretty wide range of topics, all of which dance around systems neurobiology, which is on the whole, what Mac focuses on, but that is a vast range of topics. One of the main things we discuss is the role of sub-cortical brain areas that don’t get nearly as much attention as the neocortex gets, especially in the neuro AI world, where AI tries to glean some inspiration from brains, but work-like max theoretical work that we discuss hopefully will change that cortico centric bias. Many of us have.  </p>



<p>Paul    00:02:10    The main thing we discuss is the role of the thalamus mediating communication among the basal ganglia, the cer...</p></div></div>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[




Support the show to get full episodes and join the Discord community.











Mac and I discuss his systems level approach to understanding brains, and his theoretical work suggesting important roles for the thalamus, basal ganglia, and cerebellum, shifting the dynamical landscape of brain function within varying behavioral contexts. We also discuss his recent interest in the ascending arousal system and neuromodulators. Mac thinks the neocortex has been the sole focus of too much neuroscience research, and that the subcortical brain regions and circuits have a much larger role underlying our intelligence.



Shine LabTwitter: @jmacshineRelated papersThe thalamus integrates the macrosystems of the brain to facilitate complex, adaptive brain network dynamics.Computational models link cellular mechanisms of neuromodulation to large-scale neural dynamics.


Transcript

Mac    00:00:04    I was like, oh my God, around the corner in this place that everyone likes to forget. And if you tell most neuroscientists about it, that aren’t studying it, they’ll yawn and pretend like they’ve got some other problem to solve, but come on, man. The cerebellum is just beautiful in a way what the thalamus is doing is really controlling the state and then any influence that happens to it or the time, whether it be a cortico influence, a basal ganglia influence, cerebella a curricular influence, a neuromodulatory influence is going to shape and change the way that the state will change over time, which is one of the most crucial factors for determining how we do what we do. These things often strike you when you least expect them. But I think they’re an underappreciated aspect of, of science or at least the pilot sites that I really love of that kind of wallowing in your uncertainty until it resolves itself, I think is one of my favorite pots.  



Speaker 0    00:01:00    This is brain inspired.  



Paul    00:01:13    Hey everyone, it’s Paul. On this episode, I bring you an appreciation for the detailed nitty gritty work being done in systems neurobiology that highlights its importance in understanding the big picture functioning of our brains. Mac shine runs the shine lab at university of Sydney in Australia, focused largely on how systems neurobiology can help us understand our cognition. We talk about a pretty wide range of topics, all of which dance around systems neurobiology, which is on the whole, what Mac focuses on, but that is a vast range of topics. One of the main things we discuss is the role of sub-cortical brain areas that don’t get nearly as much attention as the neocortex gets, especially in the neuro AI world, where AI tries to glean some inspiration from brains, but work-like max theoretical work that we discuss hopefully will change that cortico centric bias. Many of us have.  



Paul    00:02:10    The main thing we discuss is the role of the thalamus mediating communication among the basal ganglia, the cer...]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 121 Mac Shine: Systems Neurobiology]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2021/12/art-121-01.jpg" alt="" class="wp-image-1607" />



<p class="has-text-align-center"><strong>Support the show to get full episodes and join the Discord community</strong>.</p>



<div class="wp-block-image"><a href="https://www.patreon.com/braininspired"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2021/09/become_a_patron_button@2x.png" alt="" class="wp-image-1434" /></a></div>



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2021/12/profile-999x1024-1.jpg" alt="" class="wp-image-1608" width="250" height="256" /></div>



<p>Mac and I discuss his systems level approach to understanding brains, and his theoretical work suggesting important roles for the thalamus, basal ganglia, and cerebellum, shifting the dynamical landscape of brain function within varying behavioral contexts. We also discuss his recent interest in the ascending arousal system and neuromodulators. Mac thinks the neocortex has been the sole focus of too much neuroscience research, and that the subcortical brain regions and circuits have a much larger role underlying our intelligence.</p>



<ul><li><a href="https://shine-lab.org/">Shine Lab</a></li><li>Twitter: <a href="https://twitter.com/jmacshine">@jmacshine</a></li><li>Related papers<ul><li><a href="https://shine-lab.org/wp-content/uploads/2021/09/2020_progneuro.pdf">The thalamus integrates the macrosystems of the brain to facilitate complex, adaptive brain network dynamics</a>.</li><li><a href="https://shine-lab.org/wp-content/uploads/2021/09/2021_natureneuro.pdf">Computational models link cellular mechanisms of neuromodulation to large-scale neural dynamics</a>.</li></ul></li></ul>


<div class="lightweight-accordion"><span>Transcript</span><div class="lightweight-accordion-body">

<p>Mac    00:00:04    I was like, oh my God, around the corner in this place that everyone likes to forget. And if you tell most neuroscientists about it, that aren’t studying it, they’ll yawn and pretend like they’ve got some other problem to solve, but come on, man. The cerebellum is just beautiful in a way what the thalamus is doing is really controlling the state and then any influence that happens to it or the time, whether it be a cortico influence, a basal ganglia influence, cerebella a curricular influence, a neuromodulatory influence is going to shape and change the way that the state will change over time, which is one of the most crucial factors for determining how we do what we do. These things often strike you when you least expect them. But I think they’re an underappreciated aspect of, of science or at least the pilot sites that I really love of that kind of wallowing in your uncertainty until it resolves itself, I think is one of my favorite pots.  </p>



<p>Speaker 0    00:01:00    This is brain inspired.  </p>



<p>Paul    00:01:13    Hey everyone, it’s Paul. On this episode, I bring you an appreciation for the detailed nitty gritty work being done in systems neurobiology that highlights its importance in understanding the big picture functioning of our brains. Mac shine runs the shine lab at university of Sydney in Australia, focused largely on how systems neurobiology can help us understand our cognition. We talk about a pretty wide range of topics, all of which dance around systems neurobiology, which is on the whole, what Mac focuses on, but that is a vast range of topics. One of the main things we discuss is the role of sub-cortical brain areas that don’t get nearly as much attention as the neocortex gets, especially in the neuro AI world, where AI tries to glean some inspiration from brains, but work-like max theoretical work that we discuss hopefully will change that cortico centric bias. Many of us have.  </p>



<p>Paul    00:02:10    The main thing we discuss is the role of the thalamus mediating communication among the basal ganglia, the cerebellum and the cortex. And the idea is that through these interactions, the thalamus serves to nudge the brain into different dynamical states of operation, based on the ongoing demands of the organism and the context of the environment. And Mack has laid out theoretical roles for the basal ganglia and the cerebellum that don’t necessarily align with the way we traditionally think of them, nor does the thalamus story align with its traditional story. But Mack is interested in the system as a whole and also studies the ascending arousal system and the neuromodulators, it deploys to affect the state of our cognition. So we discussed that and how he believes it’s helpful to think of all this complexity from a lower dynamical systems perspective. So this is a heavy systems neurobiology episode, but well, worth your time.  </p>



<p>Paul    00:03:07    And I think you’ll find it worth revisiting. If you’re interested in forming a zoomed out broad picture of how complex systems like brains work on that note, I want to add that Mack. And for that matter, plenty of other guests I’ve had on the podcast to me is an example for all of this, whether you’re an aspiring student or beyond, because he’s an example of someone who makes it clear that given enough focus over time and earnest interest in your questions and a persistent curiosity in the face of resistance, it’s possible to get to a point where your thought becomes more fluid and facile and able to navigate among different systems and concepts and form some appreciation for the whole. And that is something to behold and something I’ve always struggled with in my own pursuits. I think Mac would tell you it never becomes easy, but I also think listening to him, you may disagree and think that at least it becomes much easier. You can learn more about Mac in the work that we talk about in the show notes at brain inspired.co/podcast/ 121. Thanks for listening. I hope you enjoy Mack, including his dad joke  </p>



<p>Paul    00:04:19    6:00 AM, 6:00 AM. What are you, what are you doing up at 6:00 AM?  </p>



<p>Mac    00:04:25    Well, you know, uh, when you live in such a beautiful part of the world, waking up early, looking out over the, you know, beautiful green, uh, you know, outlooks over the house, how could you not take advantage?  </p>



<p>Paul    00:04:36    What time do you actually wake up normally?  </p>



<p>Mac    00:04:38    Well, uh, that really depends, uh, on my kids, Paul, um, I have fairly rambunctious boys that like to jump up at the crack of Dawn and get into all manner of hi-jinks I’m my wife and I are often up quite early. And,  </p>



<p>Paul    00:04:51    Uh,  </p>



<p>Mac    00:04:54    Yes, yes. Uh, and then, you know, take, take advantage of the day. If the, if they don’t wake up, then I get to have a nice coffee in silence and do a bit of reading.  </p>



<p>Paul    00:05:04    I’ll see mine sleep in a little bit now until around seven. And, uh, I get up around four 30 or five just because that is the only real uninterrupted time that I get. Right. So, uh, so I was just curious if you were in that same boat,  </p>



<p>Mac    00:05:19    Uh, yeah, on a good day. I get to get up and have, have, you know, read a local papers or something like that, but it seems like more and more, uh, I get to embrace my role as a father of young, uh, young, fun boys at that time.  </p>



<p>Paul    00:05:33    How old are they? We don’t have to talk about this for everybody.  </p>



<p>Mac    00:05:35    How old are they? No, no, I’d love to, uh, so Tyler’s 10 and Calen seven. Um, they’re, they’re riding that fun age of, you know, sport and video games. So  </p>



<p>Paul    00:05:45    Yeah, I was my son’s soccer coach this last season, which was, um, challenging and also fun of course. Okay.  </p>



<p>Mac    00:05:53    Well,  </p>



<p>Paul    00:05:54    Oh, you did well, like a football soccer. I don’t, what do you guys call it down there?  </p>



<p>Mac    00:05:58    Well, we call it soccer, but yeah, the Europeans would call it football and we had a kind of married band of pranksters and his team was a lot of different kids from a lot of walks of life and different skill levels, but we had a lot of fun.  </p>



<p>Paul    00:06:12    Yeah. All right. Well, good morning, Mack. And thanks for joining me here on the podcast. Um, it’s been awhile actually. I’ve been meaning to have you on for a while. And, uh, you’re on the, I keep a list, uh, from my patron supporters of, uh, suggested guests and you’ve been on that list for a while. So I’m glad to have you on here.  </p>



<p>Mac    00:06:30    That’s awesome.  </p>



<p>Paul    00:06:32    The first thing I want to ask you, do I understand this correctly because, well, we’ll get into what you do here, but do you have a medical background? Did you go to medical school?  </p>



<p>Mac    00:06:42    Yeah, that’s right. So, um, I did a undergraduate in kind of a combination of like biochemistry and psychology and really liked a lot of parts of it, but other pots really didn’t resonate. Um, and then went to medical school at the university of Sydney. Um, loved parts of that. Um, really the thing that just absolutely resonated with me was this challenge of trying to kind of take in all this information from all these different systems, the cardiovascular system, the spiritual system, the liver, the kidney, and understand this sort of cellular level physiological detail, not just the anatomy, but how it actually worked, but then how it broke down as well in different disorders. And so you were basically this rapid fire cycling through these different, uh, um, sort of systems and their pathology, but then there’s this really amazing point where for me, it was somewhere in about the second year of medical school where you sort of, it’s like when you’re wandering around a foreign city that you’ve been through maybe one or two times before, and you sort of like, think you’re completely lost and you turn the corner. You’re like, oh wait, that’s where my hotel is. And you like, figure out how the bits all kind of intersect. And that I think I became addicted to that, that idea of deep diving into problems and then finding a way out of the morass of uncertainty into that solution space. And so I, you know, that’s really stayed with me, I think, uh, ever since I was in medical school.  </p>



<p>Paul    00:08:11    Well, yeah. I want to ask you more about this. I mean, I was, what I was wondering is if, if that’s, if ma if your medical background is why you seem to at least love the anatomy so much, because it always gives me the spins, see you, you saw it as a, uh, a challenge to integrate these systems. And I just ran in fear essentially.  </p>



<p>Mac    00:08:30    Yeah. Maybe. Um, I think I had it beaten into me in the early days or something. Um, no, look, I think, you know, at the end of the day, you know, anatomy is kind of this, uh, this ultimate arbiter of you can have the most beautiful idea in the world, but it’s like that little meme that sort of data pops up and then shakes its head and you’re like, oh, well, you know, the, the answer doesn’t really fit. The, the anatomy kind of just is what it is it’s sitting there. You can see, and there’s so much beautiful work in the field now just incredibly detailed molecular analysis of the, of the brain that if you’re up in this sort of space. So trying to figure out how this bit interacts with that bit, if you can kind of try to lay some foundation under it with anatomy that kind of has those details baked in.  </p>



<p>Mac    00:09:15    I think that really stood the test of time in medicine and physiology. You know, if we understand the function of the hot, via understanding the particular types of cells, the sinoatrial node and the  ventricular node and the way the muscles contract in particular coordinator way, then we can kind of like lean our theories on that. We can understand how the Frank Starling law of contraction relates to that physiology and we can kind of build up from there. And so for me, I really do find myself a lot of time thinking of ideas and thinking of kind of implications of anatomy, but then kind of come back to work out, okay, how could that fit with the other things we know? And there are lots of times when it can be a really frustrating endeavor. Um, the anatomy of literature is, uh, is amazing, but it’s not perfect in any way, shape or form.  </p>



<p>Mac    00:10:06    And one just to give one really good example. So much of what we know about anatomy comes from model organisms. And we don’t really know whether or not exactly the same anatomy exists in humans. And a lot of the times it’s really different in really funky ways that we don’t quite understand the implications for. And so you end up having to be very cautious about reading a particular literature at the GearCage, if it were the same in humans, what implications might that have for some psychological function I’m interested in, but you can’t really take that to the bank all the time. So you’ve really got to take a lot of these things with a grain of salt and just use them as a sort of two-way conversation.  </p>



<p>Paul    00:10:41    Yeah. I mean, one of the things that is really impressive about your work, um, is it has a holistic feel, right? And I mean, this has, because of the struggle you were just talking about. Um, but you come at it with, from so many different angles and seem to integrate so many different things. You’ve talked about this a little bit already, but, uh, you know, people have different opinions on the right approach to take, to study, uh, intelligence in general, right? So there’s a, a loud, powerful cry these days to take a top-down approach and think about the behaviors, understand the behaviors, think about the computational level of Mar and then use that to then just simply look for it in the brain and, uh, and confirm it right on the other hand, uh, there are people like Steve Grossberg, who, whom I’ve had on the show who, um, doesn’t think about, well, traditionally didn’t think about the brain at all. He sat with these psychological data and then built neural networks thinking about how those data could be explained right. And implemented. So how would you describe your approach? Do you always start with the anatomy, do you, or is it just a, a messy, uh, cycle that, well, just let you, I’ll just let you answer how about,  </p>



<p>Mac    00:11:56    Yeah. I’m afraid this is one of those situations where, um, you know, getting to meet the chef, you’re like really that’s what goes into this. Um, so yeah. Um, I think, you know, maybe another kind of missing pinnacle in, in that, uh, that description as well would be the kind of brain from inside out approach, uh, URI Zaki where you would say something like once we understand the degrees of freedom inherent within the nervous system, then we could try to launch from there to try to work out how they mesh onto the kinds of things that we can clearly do the kinds of, sort of functional capacities we have. And, and, you know, I, I’m a little bit of a kind of perspective list. Um, and I think that different questions lend themselves to a starting point from the different ends, but, but, you know, because I don’t really have skin in the game and I’m not having to advocate one of the positions that I think is maybe underrepresented.  </p>



<p>Mac    00:12:53    I probably, you know, will just sit on the fence and say, we need to kind of make them both talk together, all three of them talk together. Um, and so when I think about Moz framework, we often think about right, we think about the computational at the top, the kind of problem that’s trying to be solved. And we think about the implementation that, you know, the way it’s actually kind of baked into the, the animal. So we think about flying, uh, with a bird. And then we think about the feathers and the wings and the muscles in terms of the limitation. Um, the algorithmic level is often one that people think of as this kind of special level of doing something different. But for me, how I think about it is the algorithm is algorithmic level is where the computation meets the implementation. It’s, it’s how the feathers and the, and the wings interacted to give rise to flight.  </p>



<p>Mac    00:13:37    There’ll be in terms of a plane, how the shape of the wings and the velocity of the plane allows the system to take off and fly. Um, but to me that that’s a different kind than the computation and the implementation. It’s, it’s almost like a kind of transfer function between the two of them. And so I think if, when we say we’re interested in the algorithmic level, I think we’re kind of almost competing to this notion that we agree that the implementation and the computation of both matter. And it’s about how, if we care about that particular problem, let’s say how a brain solves a working memory challenge or how we remember a particular phenomenon, or maybe even how a convolutional neural net is able to classify between a dog and a cat, whatever it is that we care about. If we care about the Albert algorithmic level, we’re saying something about the architecture and something about the problem mattered, and it’s how those bits came together in this particular step that I, that I want to understand.  </p>



<p>Paul    00:14:28    So, uh, do you think in terms of constraints, um, and so I know you, you are a fan of the dynamical systems theory approach, and you use it and a lot of your work, um, but do you see both the implementation level and the computational level as providing the, I mean, what, you know, is constraint and important constraint to, uh, from, from both sides to then mediate the algorithmic level?  </p>



<p>Mac    00:14:54    Yeah, it’s a, it’s a really great question and pull. And, um, this is where I think the, the chef in the kitchen out of analogies comes to fruition because at the end of the day, I think with my own thinking, when I reflect back on how I came to a particular, uh, sort of hypothesis for some interaction than other system, it’s usually the ignorance that I have that allows me to get to that position. And, and, you know, my, my, my Paul poor understanding of the specifics of anatomy or my, my, uh, less than subtle appreciation for the complexities of some cognitive capacity that we have allows me to kind of mush them together in a way that says, well, what if they are interacting in this way? And if they are, what are the implications? And I think for me, that’s really the thing I love the most about science is the kind of hypothetical nature of it.  </p>



<p>Mac    00:15:46    Right. I, my job as a scientist, as I see it is to say, um, I don’t know if this is right, but if it is, what are the implications, and then I go off and try and test some of those. And sometimes the ideas of good and other times the ideas are really bad. And I think that I’ll appoint of a scientist isn’t to sort of, sort of say, hold up a tablet, etched in stone, here are the correct things. Here are the facts of the world. But rather to say, someone who is trained in the scientific process knows how to go out and say, that’s curious, how the heck does that work? How can I put those things together? And then it bring it back to the kind of anatomy thing. This is like a fundamental mystery to me, somehow in some way, these tiny little specialized cells that just really, really love to bother each other all the time with action potentials or to squirt neurochemicals onto one another, that changed the, you know, the way that the different systems can fire action potentials each other.  </p>



<p>Mac    00:16:38    Somehow that coordinate activity gives rise to this conversation. And what a fun challenge, how do we work that out? And what is the language we need? What are the constraints that we need in order to take this bag of tissue? I mean, this is again where medicine kind of helps, right? Because early on in my, in my learning, you’re, you’re, you’re there in the wet lab, looking at cadavers and, and pro sections and realizing that the system is fiscal like deeply physical at the end of the day. And it can, it conforms to all of the same rules and laws that have given rise to really great explanations of the heart and the liver and the kidney, right? It’s the same sodium potassium ATPase and all of those cells that shoveling ions across membranes, it’s the same into a plasmic reticulum. It’s storing calcium that then get real, it gets released when you need it to force some change in the action of the system, right?  </p>



<p>Mac    00:17:30    That’s the, that’s the mechanism of your heart increasing. When you go for a run, it’s the same thing that your brain uses when you’re trying to increase the firing rate of neurons as a function of something like noradrenaline or dopamine or acetylcholine, some of these, these gain mediated mechanisms. So to me, there’s a, there’s a, I have a deep reverence for the fact that we have this huge challenge on our hands. And as a scientist, I want to be able to come in and say, well, I don’t know how it works, but what if it worked like this and the anatomy kind of doesn’t fit with that story over there, but it does suggest this one and just sort of help put constraints into that framework. So we can start asking empirical questions and, you know, this is, you know, this is where I sort of see neurosciences at a really early stage and a really exciting one because literally these data we have and look at this really hard problem we have, and we can kind of attack it together.  </p>



<p>Paul    00:18:19    You want to talk some science?  </p>



<p>Mac    00:18:22    Of course.  </p>



<p>Paul    00:18:24    Uh, another reason why I was interested reading your work is because, so we’re, we’re about to talk about the role of the thalamus and, uh, sub-cortical structures like the basal ganglia and the cerebellum. So these days the cortex is the most important thing in the brain, right? And it has been for awhile. And at least the, uh, if you ask, uh, I don’t know, eight out of 10 neuroscientists, something like that. But, um, when I was going into graduate school, I was offered a project from my advisor mark summer. So we studied a cortical area called frontal light in front of my field, uh, which has loops, uh, that travel through the thalamus and basal ganglia and loops that traveled through the thalamus and the cerebellum. And he wanted me to, well, he offered me a project, uh, for the cerebellum and the frontal eye field thalamus cerebellum loops.  </p>



<p>Paul    00:19:12    Um, I ended up doing my own silly project on metacognition, but then reading your work, I kept thinking what if I had done that and that, and how involved I would be, uh, with and familiar with, with this sort of stuff that you’ve done. So, I don’t know, I just, uh, a little little trip down memory lane, partially reading, reading your work. So, so like I said, it’s all cortex, right. But, um, one of the things that you have done is, uh, brought in the thalamus and, um, and loops in the thalamus with sub-cortical structures. So I want, uh, I would love for you to just kind of summarize why you think the thalamus is important. And then, uh, just the overall broad picture of the cerebellum and the basal ganglia, and you can get into whatever nitty-gritty detail that you would like. And of course, I assume that you’re gonna talk about dynamical systems theory as well, and I’ll interrupt you.  </p>



<p>Mac    00:20:02    Yeah. And do it all in the auditory format to where I can’t show any pictures.  </p>



<p>Paul    00:20:06    And by the way behind you, is that are those, uh, Ramonica hall drawings on the wall?  </p>



<p>Mac    00:20:11    Well, it’s a, it’s a Greg Dunn picture, but it’s, I think it’s inspired by the golgi statins from keyhole of the left five-prime neurons, which, uh, I, I have to say Matthew lock-in in Berlin has convinced me, these are the powerhouse of the cortex. They’re just beautiful, beautiful cells. Maybe we’ll talk about them in a little bit. Yeah. Um, so yeah. Um, you know, I think a historical perspective helps a little bit to orient this and then maybe we can get into some of the details. So yeah, the cortico centric perspective, um, has really, I think, defined our epoch of, of neuroscience and, and there’s really good reason for it. If you were trying to understand the human brain and you look at it, right, you just put it on addition in front of you. There’s a whole bunch of cortex there. And if you compare it to a chimpanzee or a macaque is a bunch of more of the cortex right in front of you.  </p>



<p>Mac    00:21:02    It’s a really great place to start looking just from first principles. And also when you see people that come into the clinic with, let’s say a particular stroke, this idea of localization of function in the cortex is really, really profound, really pervasive. And it’s been there from, you know, for a really long time. In fact, it goes all the way back to the Coliseum. Galen was the physician for the, uh, for the Coliseum for a few years. And he actually sort of came up with this hypothesis that for him nervous system function was actually about pneumatics. It was about pushing fluid through holes and tubes and things. But for him, he was like, okay, if the gladiator comes in, you know, and he’s had the kind of broad sword hit him across the brow. Now, all of a sudden he can’t, you know, um, you know, uh, make any mental plans kind of like, uh, gauge, maybe couldn’t inhibit his own behavior.  </p>



<p>Mac    00:21:52    And then you’ve got someone over here who received a mace to the occipital cortex where he can’t see anything on the right. And so he came up with this notion of the Numa flowing from these very particular locations. And that has really kind of spread over time to this. I think this sort of, I would call this kind of localization kind of hypothesis is, is really the dominant story in the field. And, and really like my, the world that I came from, uh, so empirically it was using functional MRI to run analysis of brain imaging data. And Don’t apologize. We can w we can talk about Fri, uh, in a little bit, if you want. I mean, in a lot of ways I think ephemera is, um, is kind of like a, like a pimply teenager. That’s like been through it. All right. They’ve been insulted in every way they possibly could. So like, bring it on, man. I can take it right. They’re kind of coming out onto their own,  </p>



<p>Paul    00:22:44    Uh, favorite, uh neurophysiologist um, uh, uh, poking, poking someone who does have MRI. I have a lot of respect for that from our eyes. So,  </p>



<p>Mac    00:22:52    No, sorry. No, no, no, no, no, no, not at all. But so the, the prevailing, uh, you know, way of analyzing it from right data, which really kind of came from pet imaging, which was, is to run an experiment, right. Um, run a block of looking at a face and a house and then use some statistical contrast to find the most extreme statistical values. Yeah. And that, that approach really lends itself to this localization idea. Right? If I get you to look at a picture of someone you love and someone you don’t like very much, and I control some, you go, I’ll look love is sitting in the nucleus accumbens. That’s a really sort of attractive conclusion for me to make. Um, but if you think about it, uh, you know, just from a, again, from a slightly different perspective, if I was to sort of take that part of your brain and, you know, somehow expertly remove it and put it onto a dish, it’s not loving anything.  </p>



<p>Mac    00:23:38    It’s just a bit of neuron so that those neurons have to interact with the whole to be able to function. And so we need to start thinking, I would argue at a much broader level about coordination amongst nervous system, uh, areas. We still need to keep a skerrick of that localization idea. Cause it’s clearly got some truth to it. There is a specificity to our nervous system. This part of the brain is different to that part of the brain. But if we take that to its extreme, I think we, we come up with so philosophical answers that are a bit impoverished. If we start to think about how that individual area with its constraints works with the whole system, admittedly, a very hard problem. We can start to ask about how the system can actually use the inflammation that might be being processed in that area for adaptive function.  </p>



<p>Mac    00:24:21    So once you start moving in that direction, you can start thinking about different parts of the cortex interacting with one another. You could say, how does the frontal cortex interact with the pridal cortex during working memory or something? Or you could ask, how do I, you know, if we wanted to talk about something like pretty deep processing, how is some higher level area providing some kind of a prior, some kind of evidence or information for that system so that when I press it processes evidence, it can push it in one direction and interpret them one way or another. Um, those are one style of questions you can ask them. The field is really, you know, uh, is a really developed field in that space. But for me coming from that background again in medicine where you can’t think of the hot outside of the context of the whole cardiovascular system and the lungs, I’m forced to kind of come back to that perspective of saying, what are we leaving out of the picture?  </p>



<p>Mac    00:25:13    What if we look at just the cortex, what else is there? And if you start to zoom out just a little bit like that, you notice that number one, the cortex does nothing on its own, right? It’s interact, it’s interacts with so much of the rest of the nervous system. Um, and to the consequences of impairments in these other areas, the thalamus is a really great example, but it’s by no means the only one, um, is really profound. And so if you have a stroke in your thalamus or a tumor in your thalamus, you often lose consciousness, you go into a coma or you can have really, really profound deficits in wide ranges of domains. They’re not as specific as you might see in the cortex, but they’re really, really profound. And so we’ve known for a really long time that the thalamus is incredibly important for all these really large functions. Arousal’s one of them, but it’s involved in working memory. It’s involved in attention, it’s involved in a ton of different psychological,  </p>



<p>Paul    00:26:07    But the traditional story, right, is that it is important because it’s a pass through maybe a bottleneck for feeding the important stuff. Right. That’s the traditional story. And that’s the story that’s changing.  </p>



<p>Mac    00:26:21    Yeah. But nobody puts Elvis in the corner. Right? Paul?  </p>



<p>Paul    00:26:25    No. Well, not anymore. Thanks to people like you.  </p>



<p>Mac    00:26:29    Well, so I think, you know, I, uh, I’m absolutely just the, um, the broadcaster of other people’s brilliant work. Um, so the wellness, yes, for a really long time, got put in this, in this basket, as you know, the relay was kind of, um, the story that people would tell him the reason for that is that if you look at the connections of the thalamus, let’s say in the context of vision, you’ll see the inputs coming to the retina, pass through an area called the lateral geniculate nucleus, which then goes on to the visual cortex as well, the superior colliculus and you’ll say, okay, well we know V one’s really important. And if I knock out V1, someone can’t see anything. And so what I’m just going to say is, well, the pharmacist’s job is to kind of make sure that the cortex got the information that it needed to process that vision.  </p>



<p>Mac    00:27:14    And this again is sort of embedded within this. Um, uh, I would argue kind of a misguided view of, of evolution, which is that, um, what’s happened over time, is that a brainstem of the reptilian brain has kind of like had an ice cream put on top of it, the, the, the subcortex, and then this got like a sort of rain coat put over the top of it. And the cortex, this is called the McLuhan’s triune brain theory that essentially adding whole new pits on top of one another. Um, whereas a much better description of, of evolution. If you talk to someone like Paul CISAC or Lupo Ellis, is that you’ve got the basic bowel plan of the whole time. There’s, uh, uh, a midbrain, a brainstem, it connect to a spinal cord, a cerebellum, a thalamus, a tectum and some form and a hypothalamus.  </p>



<p>Mac    00:27:57    And the some form of what we call a tellin CapitalOne, which is cortex special ganglia, a bunch of other structures, like the amygdala that have sort of expanded out like someone blowing up a balloon and the balloons degree of freedom is up in the telencephalon it’s expanded, but you’re not adding things onto one another. Um, and so the reason that that’s important is that if you think of the cortex as being added on, then it’s really, really easy to think. Well, in humans, the reason we can see the reason we’re conscious is that the cortex just popped in. And before that these poor suckers that didn’t have a cortex, they couldn’t do anything. They were just little automatons running around the world. Um, if you take the, um, so that was the kind of prevailing way of thinking about it. If you start from there, you can get some really, really interesting taxonomy.  </p>



<p>Mac    00:28:37    For example, Mary Shannon, uh, regularly have a really nice way to think about the thalamus, which is that a lot of the connections are like that one in the LG. And I talked about what they’d call first order, which is that they receive an input and pass onto the cortex, but a whole bunch of the thalamus is actually what they call higher order, which means that it receives cortical inputs and sends it right back to the cortex. So it’s a lot like a kind of complicated, hidden layer in the kind of deep, deep learning kind of a way of thinking. And that it’s doing some kind of weird OIC, mentation or manipulation of the data, but it’s not directly related to what’s inputting or what’s outputting from the network. Um, so, so that’s one way of thinking about it, but the way that I’m really attracted to comes from a different scientist, Ted Jones is a neuroanatomist from, from New Zealand, um, unfortunately deceased.  </p>



<p>Mac    00:29:20    And his way of thinking about it was a bit more thalamus centric. What he said was if we look at the thalamus and we try to understand the projections of the different thalamic nuclei, what kinds of patterns do we see? And he identified the type that we talked about for that relay type. He called that a core type where it receives an input, and it protects really precisely to the cortex, usually in the middle layers of the cortex. But interdigitated with that, it’s almost like a blend of these little bit ying and yang where every single nuclear and that the elements has a little bit of each of these different populations. Some of my, more than others, this other population, he called the matrix population. Cause it looked like the core nuclei were embedded in this matrix of these other cells in contrast to the corals, they protect up really diffusely.  </p>



<p>Mac    00:30:01    And they do that either to the super granular layers of the cortex where a lot of the feedback projections from highlighters come in, um, but also to a lot of important sub-cortical structures like the striatum and the amygdala. So if we, so if we, if we start with this sort of Philando centric perspective, all of it’s just a relay then if we start incorporating these other cell types, which are really numerous, um, it’s really hard to take that same analogy of relay and kind of like feed it onto these other cells, right? If they’re relaying, they’re doing it in kind of a weird way, right? Th they’re sending an infant a message out in a really broad fashion, you know, things could get lost. You could have that, you know, purple monkey dishwasher game that we used to play or play when we were kids where you kind of, you can’t really envisage that, um, that message passing metaphor, really holding up.  </p>



<p>Mac    00:30:48    Um, and actually, uh, in some sort of side work that our really talented postdoc DUI Mueller in my lab, uh, has done. Um, there’s actually a really interesting analogy with this, this type of diffusely projecting system, which is that they work a little bit more like temperature, uh, in the Woody and in a glass of water than they do like a message being passed and to cut a very long story short. Cause I don’t know if we want it to go there. Yeah. We can come back to it later. Um, the idea is that by having a kind of diffuse signal that passes up to many different areas, what you’re essentially doing is, is allowing those, those, um, contacted areas to become more likely to be part of some active coalition in the system. You’re sort of imbuing the system with a flexibility and a variability that it wouldn’t otherwise have so much the way that heating up a glass of water, lets a little water molecules whiz around in ways that they couldn’t, if they were stuck, let’s sander in a bunch of liquid or a block of ice,  </p>



<p>Paul    00:31:37    Uh, is that Becca is the idea that I’m sending the diffuse diffuse projections is raising the excitability of the neurons or is it the actual firing of the neurons that puts them into a regime where they’re supposed to be more like ensembles, et cetera?  </p>



<p>Mac    00:31:53    Yeah, I think it’s probably one of those little column, a little column B kind of things because the matrix elements, uh, not the only structure in the nervous system that have this sort of type of diffuse projection. And one of the other main culprits is an area that is probably kind of my favorite in neuroscience at the moment, which is the ascending arousal system, which instead of projecting up in that diffuse way and releasing glutamate like the matrix elements would, which we typically think of as that kind of message passing type, um, neuro-transmitter they released a whole other class of neurotransmitters that fit into this category called neuromodulators, which, um, they’re a little bit more like a whole modal style of passing where they, they, um, hit their little receptors, but the main impact they have is to change the kind of internal milieu of the cell.  </p>



<p>Mac    00:32:36    They release some calcium or they open or, and voltage gated channels, and that can change the excitability of the system or the receptive, the receptivity of the system it’s in the way of putting it, um, that can kind of alter information processing modes can change the kind of, um, w we call this the kind of, you know, uh, sort of the state of the brain, uh, but kind of an inverted comments. Um, so yeah, I don’t know if we want to get to that. This is the problem with Tim, the neuroanatomy, right. It can go into many different directions.  </p>



<p>Paul    00:33:03    That’s the thing, right. So yeah, let’s come back to the neuromod modulators because, uh, you’re doing theoretical work, uh, with that as well. And you know, you’re just interested in everything. So I, I would like, I know, but you know, you’ve got to coach soccer and stuff. How do you have the time, but no, so sorry I interrupted us there.  </p>



<p>Mac    00:33:24    No, no, I’m just let me see if I can pick the thread back up. So, um, so if we were traditionally thinking about the cortex is special and everything else is kind of boring and we change our mindset and we say, okay, that everything’s been there the whole time. How has it elaborated? Now we need to start asking, well, how is the thalamus organized? And if we take this cortex first perspective, I think we can kind of lead ourselves into thinking the thalamus is just passing us a message. It’s sort of, you know, the Butler waiting at the front door don’t bother the cortex unless anything really important happens, right? That’s maybe one way we could start to think about it, but if we take Ted Jones’s perspective, we can think now the thalamus has this different set of capacities, a different set of ways of interacting with the nervous system.  </p>



<p>Mac    00:34:07    Um, that might be really beneficial for explaining the kinds of modes that it can process in. And so if you’re interested in conveying that you heard a particular sound or not, maybe that core system is still really good, but if you want to make it so that you work out what that sound was, how could I disambiguate that sound from a whole bunch of different ideas about what that sound could be? I would argue that something like the matrix, system’s going to be much more helpful because what it’s going to do is it’s going to bring online way more systems that are going to help you to disambiguate and make the system function in a different kind of a mode. So that’s really the starting point. Um, but you know, once we’ve taken that first step away from this cortico centric perspective, it’s like this sort of endless walk, there’s not, you can’t help, but take that next step.  </p>



<p>Mac    00:34:51    And if you look at the thalamus, um, one of the really, really interesting things is that even though it has these different blends of cells, it actually has different sub nuclei within it as well. It’s a really fascinating structure near anatomically. And we’ve learned a lot in the recent years from these really brilliant studies from, you know, groups like, uh, the Allen brain, uh, group and Adam Hammons group has this beautiful paper on the thalamus, um, looking at all of its complexity of the different projection types. Um, but one of the things that, um, you know, I’ve, I’ve become really interested in is thinking about, well, what other structures actually interact with the thalamus? Let’s call it from below. So if we, if the cortex is its connection to the top, how it’s interacting first order, higher order, or call matrix type projections, what kind of projections impact the thalamus?  </p>



<p>Mac    00:35:38    And this is, uh, you know, again, one of these kind of interesting stories that shows you in a way, what the chefs doing in the kitchen, um, and it makes the chef look, uh, completely incompetent. But, um, I, when, when I hired my first postdoc, again, Eli that I mentioned before, um, because we came from such different worlds, he he’s from the world of physics and, you know, I have this background in medicine and neuroscience. We really wanted to kind of find a way to talk to one another. I, you know, I really wanted to hire someone that could help me do computational modeling at a high level. And Eli’s really got great experience in that space. Um, but in terms of his neuro anatomy, he’d really focused on the basal ganglia, um, which is a fascinating system, but I wanted to talk about a little bit more than that, this sort of bigger picture.  </p>



<p>Mac    00:36:20    And so we had these really great conversations. And I think when you, when you have really great conversations with an another enthusiastic scientist, you can both find new patterns in places that you may not have expected them, uh, like that temperature analogy I mentioned, but you can also quickly realize you’re an ignorance. And I realized in trying to explain to Eli how I saw some of this fitting together, that I really didn’t know enough about what, how the, these different big structures in the subcortex like the basal ganglia and the cerebellum were impacting the felonies. Um, and the reason I bring both of those up is that, um, the thalamus, as I mentioned, has all these different structures in it. And in the, what we call the eventual tier, the kind of most interior part of the nucleus is a couple structures, the ventral anterior ventral, lateral nucleus, as well as the medial dorsal nucleus that received basal ganglia and cerebellar inputs and are known to be part of these loops, that kind of project back up to the cortex.  </p>



<p>Mac    00:37:12    So, but we don’t really know a huge amount about exactly which regions projected, which, and there’s lots of different stories in the literature about this. And so this is something that, you know, I’d known really from back in my PhD days. I know that there was this uncertainty and, um, I’d had all these great conversations. Uh, Charlie Wilson at UTS was an absolute, absolute savior doing it during my, um, my post-doc, I was over in the U S working in California, but my wife’s from Texas. So we always sort of spending time both in San Antonio and in Palo Alto and at UTC, Charlie and I would have these fantastic long conversations about the basal ganglia and the thalamus. And that man is brilliant. Anyone who gets a chance to chat with them should buy him a beer. Um, anyway, we were talking about this and, and he put me on to some really great research from the group in Japan for promoter is the, is the first author.  </p>



<p>Mac    00:38:02    Um, and essentially what they’ve done is they’ve taken, um, uh, rodents thalamus, and it looked at these ventral Tio and they tried to work out, which, uh, if we characterize these, um, different thalamic cells, according to some staining, which regions are projecting to the different types of stained filmic cells and to cut a long story short, it turns out that they stained the cells with exactly the same stain that Ted Jones had used to find the core matrix populations. And lo and behold, they found a really, really clear rule, which is that the core cells in the ventral tier, the ones that project precisely to the cortex, they receive glutamatergic inputs from the subcortex most predominantly from the deep cerebellar nuclei, the main output of the cerebellum. And in contrast, the matrix cells, the ones that project, a fuselage to the cortex received permanently GABAergic input from the Globus, pallidus the main output of the basal ganglia.  </p>



<p>Mac    00:38:51    And it was like, it was like this sort of, you know, this moment where everything kind of went, oh, wow. So there’s this story about the thalamus and the cortex interacting that has this really clear kind of difference in terms of it has a population dynamics that could emerge from it that is receiving very different inputs from these different sub-cortical structures. So then all of a sudden it was like, when you’re doing a puzzle and you’re like, oh, that’s how it goes. You just like, turn it to the side. And it clicks in like, oh my God, that’s amazing. And I spent the better part of a year, you know, really reading and thinking about what the implications would be for that little twist, where we can all of a sudden have a discussion about thalamus cortex, basal ganglia cerebellum, and think a bit thinking about how those interact and what the implications would be for all these other mechanisms and stories we have that are really, you know, grounded, granted grounded, let’s say much more in the cortex or much more just in the cerebellum or much more just in the basal ganglia.  </p>



<p>Mac    00:39:48    And all of a sudden we had this way of kind of talking about them together. And, um, you know, I see this as, you know, the first very gentle step in, in what I, what I view as a massive landscape of possibilities of trying to understand the system in detail with the kinds of tools we have now up to genetics and, uh, you know, really great, um, detailed characterization of these systems, as well as with computational modeling, I think is a really great opportunity to, to think about these things. But that was the paper we’re discussing is really kind of was the first step in that direction. I was saying, how might this stuff work together? And what might the implications be?  </p>



<p>Paul    00:40:21    I love those moments in, uh, they’re very few and far between at least three day wear, but, uh, those moments where it, where it really clicked,  </p>



<p>Mac    00:40:31    You know, that there’s a little side story here that I think is sort of fun as well. Um, so I, as I mentioned before, I did a PhD, uh, in FMI, but not the focus of my PhD was on Parkinson’s disease. And so everything in the world of Parkinson’s disease is basal ganglia and document the whole game. And we were trying to stretch that out a little bit and think about the system a little bit more because there’s a bunch of symptoms in Parkinson’s that don’t really make all that much sense in the traditional kind of opened the gate, close the gate in the basal ganglia story. Um, so it was already kind of thinking about things from that a bit more zoomed out perspective, but I feel like I’d, I’d read probably too many papers about the basal ganglia at this point in my career.  </p>



<p>Mac    00:41:12    Um, but the cerebellum didn’t make as much sense to me. I didn’t really have as great of a feel for it. And, um, it was actually, uh, my father, uh, he’s, uh, an evolutionary biologist and, um, being an evolutionary biologist brains to him and kind of just like a boring side detail. That’s what interesting beauty of the ecology ecological universe. Um, and so having my father talk to me about neuroscience was quite, uh, uh, quite a trait. And one day, uh, when I was kind of towards the end of my PhD, he came to me and said, um, you know, your, your son Tyler at this time, Tyler was about year and three or four months. He’s like back about two or three months ago when Tyler was walking, I noticed that he was kind of stumbling around and it was really effortful. And he was really focusing on everything he was doing.  </p>



<p>Mac    00:41:56    And now when I watch him run around the pockets, like nothing, nothing bothers him at all. He just runs around the world and what’s happened in his brain. And I was like, okay, number one, my dad asked me a question about the brain, like what’s going on right here. Um, but, but number two, it, it really kind of threw me for a loop because here’s this abrupt change that’s happened in my son’s ability to do things effortlessly. And I think that’s a really, really crucial feature of our, of our nervous systems, particularly humans, right. Where my dad likes to say that, um, humans are, we like to think we’re the kind of pinnacle of evolution. Um, but if you lined us up against any animal in the animal kingdom, they would beat us at the thing they’re good at. We’re not the fastest. We can’t swim that well, we can’t see very well. Um, but we’re, we’re really flexible, right? We’re born extremely organized and we can do things really, really, uh, we can do lots of different things wherever we put our mind to, we can learn, but we learned to do them quite effortlessly, such that we don’t really think about them anymore. And I think this, this case with my son learning how to walk, uh, with this kind of effortless nature was a really, really good example of this.  </p>



<p>Paul    00:43:01    I thought you were going to say that you’ve told your dad, oh, dad, that’s the cerebellar dilemma, a critical loop coming online through the course. You didn’t, you didn’t just immediately say it. Yeah,  </p>



<p>Mac    00:43:12    No, no, no, no. I did not. No, no, it’s, this is, I, I think this is important though, right? Because I think we have this conception sometimes in science that we, that, you know, think the clouds for kind of shine of light pops down and you’re sort of somehow inspired by something I wasn’t inspired by the answer. I was inspired by how hot that problem was. Right. How could we do this? And when I went and read the literature, most of the literature that talks about what we would say is sort of habitual behaviors focused on the basal ganglia. Um, right. We would say something like, as you learn how to do a particular habit, it’s something like the anterior pots of the basal ganglia decide that something is interesting. And then over time it gets passed back to more posterior parts of the basal ganglia and they kind of execute the habit.  </p>



<p>Mac    00:43:56    But as I said, coming from this world of Parkinson’s disease where we’ve been thinking a lot about the basal ganglia, I was like, well, look, that makes sense that you’re you’re in some sense, but we also know that the basal ganglia is really a dimensionality collapser right. The number of cells in the striatum is a couple of orders of magnitude, less than the cortex. And again, you collapse it to the pallet and before going back to the thalamus. And so if you you’re going to store a really precise habit, it doesn’t seem like the best place to do it. Right. You you’ve, you’ve sort of, why would you waste your resource in this space of going, which is already limited on one very precise thing when there are other parts of the brain to do it. And so I was immediately a little bit skeptical of this story just from a kind of first principles perspective.  </p>



<p>Mac    00:44:37    And then the more I looked, the more I read, I was like, oh my God, around the corner in this place that everyone likes to forget. And if you tell most neuroscientists about it, that aren’t studying it, they’ll yawn and pretend like they’ve got some other problem to solve, but come on, man, the cerebellum is just beautiful. I mean, it’s, it’s over half of the cells in an adult human body. Most of them have these little tiny granule cells. And it does the exact opposite of the basal ganglia when it receives an input. Usually the outputs of the, um, the layer five parameter on, so things sitting on the picture behind me from the cortex, it does a dimension out of the expansion. So it’s a little bit like the Colonel trick in machine learning, right? It’s like spread like a reservoir network. It’s spreading out the signal.  </p>



<p>Mac    00:45:16    And now you can condition on all the little subtle differences that you might never have guessed were important. And whatever’s most adaptive gets fed back to the cortex as a little guests as what’s coming next. And that is the one that really hit me where it’s like, oh, of course, what Tyler’s learnt how to do is to anticipate what’s going to come next when he’s walking. So now he doesn’t have to think about it. He can just do it. And I think this translation from deliberate processing to more sort of what we would call delegated automatic processing is absolutely fundamental. And so a lot of the history of this, uh, the paper that we’ve been discussing is in trying to put some foundation beneath that insight, uh, and to try to work out what the neuroanatomical basis might be, um, of that kind of mechanism, just to kind of, to summarize the, I think if we think about just the kind of cortex we can come up with certain answers or mechanisms for how particular functions might arise.  </p>



<p>Mac    00:46:14    Um, and so, you know, a really good example would be the predictive processing we talked about before, right. Um, if you have an expectation that something will happen, then the evidence that comes in, um, can interact with that expectation and give rise to what we call the posterior. And then you can kind of act on that. And this is a kind of really classical framing to think about, um, a lot of, um, psychological functions. There’s a ton of evidence for this. Um, but what we don’t know, and Michael Spratly’s work is really, uh, great and I’d point people towards, uh, towards his work to this, to this end, what we don’t know is how exactly that kind of a computation is implemented in the brain. And we have, uh, different, uh, hypothesis about it, probably the most popular, uh, it comes from an old idea from Robin ballad.  </p>



<p>Mac    00:46:58    Um, and the idea there is that if you want it to be maximally efficient, the best possible thing you could do is to have a P uh, prior come down and evidence come in. And if they match don’t do anything at all, right. If you get a good match, that’s it, you’re done. The match is finished, but if you don’t get a match, then what you should do is you should send a signal to other areas to try to work out what the appropriate natural as they call that a prediction error, because you made a prediction. The evidence came in and there was an error. So that’s the way we typically think about that in a brain is that, um, the cortex sort of sends a little projection down. That’s the prior that the evidence comes in, let’s say via the thalamus and its little relay, the Butler passes the message to the, to the person sitting in their drinking room.  </p>



<p>Mac    00:47:41    And then they kind of all look at matched and we move on. Um, the problem is when you look at the anatomy and you start to think about the rest of the system, you realize that while that might be one way that this system works, it’s missing some of the benefits you might convey by having the sub-cortical system play a role. For example, the thalamus can project up in that diffuse way and act as a, you know, what we would call a Pryor, but it doesn’t seem to have a lot of the other kind of constraints that the cortex has in its specificity. So if, if the thalamus projects up versus let’s say, you know, some area in the posterior parietal lobe, we might give those very different labels in terms of their functions, but they might have exactly the same kind of an impact on the system.  </p>



<p>Mac    00:48:23    So that makes you sort of question a little bit, um, another is that the cerebellum is projecting up much in the way that the kind of evidence that would come in from the lateral geniculate nucleus of the thalamus is coming in. And so it just makes you question how these different systems might function when we kind of take that more zoomed out perspective. Um, and so I, I think, you know, again, uh, this is really the first step, um, in a, in a direction that I think requires a lot more research. Um, but trying to embrace that systems level perspective when we’re thinking about the functions that only emerge from the whole system, right? That our ability to have a conversation is not just because we have well-developed several quarter CS that can process language. It’s also in our ability to, uh, anticipate when the end of a sentence will happen or to know the right time to ask the right question to push us in a different direction. But it’s also in our ability to be awake and alert and to even process information in the first place. And I think as much as that might sound a little bit, like, you know, hand-waving, I think it’s important that we remember this and that we remember that the kinds of functions that we’re trying to describe and not due to parts of the system, but the system as a whole working together.  </p>



<p>Paul    00:49:31    I think that was well put. Yeah. So, so that’s, so you have a role, um, I mean we can kind of step through a couple of the, uh, implications that you talk about. Did we, did we get enough of the story in of the basal ganglia versus cerebellum and how they, uh, put the brain in different modes and how we, one way to communicate that and think about it is through dynamical systems theory, or should we kind of summarize that before we move on?  </p>



<p>Mac    00:49:59    Yeah, no, that’s yeah. Looking at, we can give that, give that a crack. Um, so yeah, over the course of probably the last, um, four or five years, uh, I’ve been really led down this, this path towards thinking about the brain as a, as a sort of constantly evolving dynamical system. Um, my collaboration with Michael break Spears out of the university of Newcastle has been really, um, uh, really inspirational in that front. And, um, and then working with Eli and Brandon, my two post-docs, uh, also Johan John and cardiac tryna, and Cal Sawyer. We have this little fun group that kind of integrates across some of the, um, this space to try to just have discussions about how can we, how can we frame some of these ideas that we’re thinking about in neuroanatomy in a dynamical systems, language and vice versa. Um, and so one of the challenges that I kind of set for myself with the paper we’ve been discussing is to say, okay, we go to this neuroanatomy, but if you’re not a neuroanatomist, I might be describing core cells and matrix cells and calbindin positive and pop Alvin and positive itselves.  </p>



<p>Mac    00:50:58    And it might just be like, you know what, this isn’t for me. I don’t like to think about different cells. I want to think about what the implications are. And so I wanted this challenge of trying to think, all right, if we think about the brain as, uh, having these kind of, um, low dimensional structure to it, and we think about how that structure is evolving over time. I really great analogy for, for that process. And in fact, it’s a little bit more than analogy because the kinds of equations that would describe that analogy are really directly related to how these, the nervous system is interacting with itself. Um, so a really great analogy is this idea of the attractive landscape. And so the concept would be something like this. Um, if you conceptualize all of the billions of neurons in your nervous system as a little boy, a little bowl, um, the way that that nervous system will interact over time, or the way that it’ll change over time is dependent on what kind of opportunities are present, present to it.  </p>



<p>Mac    00:51:50    Um, so you can imagine, you know, um, you’re sitting in front of the coffee machine in the morning and there’s a little button on the coffee machine in front of you, and you have to push it to make the thing up and say, you can put the coffee party in, or if you’re really fancy, you have to like put the coffee in a tamper down and do all your history stuff. I have no time for that. That’s the secret to my time to read is that I use an espresso. Um, so, um, you have this opportunity to present to you. And the way you might think about that is that there’s an attractor or a basin that’s pulling the system towards it. Um, and so this is a traditional way of thinking about kind of dynamical systems frame. It doesn’t have to be a brain, it could be any kind of dynamical system you’re interested in.  </p>



<p>Mac    00:52:26    Um, but for, for me, I was trying to think about in this way. And so I started to think, well, if you’ve got an attractive landscape and there’s a really, really deep, well, that’s a little bit like a little coalition of neurons firing a lot, they’re kind of basically saying this is where we’re going to move towards. And if you’ve got a lot of neurons firing at the same time, that’s a little bit like saying, oh, there’s a thousand different options in front of me. I don’t know which one I should go towards. And that’s a little bit like the landscaping Flato and we have some computational modeling that kind of fleshes out, pushes this analogy out in a much more satisfying way if you’re interested. Um, but then I started to think, well, if the basal ganglia is projecting to these matrix, uh, population, right, they’re really diffusely projecting one.  </p>



<p>Mac    00:53:05    And the cerebellum is projecting to the coal really precisely projecting one. Um, then maybe they’ll have different, uh, impacts on how the brain state, at least in the sort of ventral tear of the thalamus to the sort of frontal cortex might evolve over time. And so if I’m coming across a particular, um, challenge, like, you know, uh, how do I, you know, shoot a basketball? That was a lot of my intuition was this. We just had a basketball hoop installed in my backyard. I was trying to remember how to shoot free times. And, uh, it didn’t start out particularly elegantly, but I’m thinking like, what am I doing here? Um, but what I noticed with that is that early on, I tried lots of different things. Maybe I’ll talk to my elbow in, maybe I’ll kind of flip my wrist more, maybe I’ll turn my shoulders to the side.  </p>



<p>Mac    00:53:43    I had all this variability in my system theater important. You gotta get the feet pretty straight, sorry, the feet that’s right. Yeah. You got to find it. But then in the NBA, like everyone’s always jumping around with their fate and stuff and Steph card sort of flicks the ball. Like he doesn’t care about it. It’s crazy. Right. Um, anyway, I was trying to think about this and I was thinking, well, if the matrix thalamus is receiving the basal ganglia input, and I know it’s involved early in learning, uh, and it’s gating via these sort of two inhibitory populations, the, the eight inhibits the stragglers powders, which then inhibits the Thelma. So what you can do is you can turn off that inhibition. Well, that’s gonna do is it’s gonna release these matrix population, uh, just a little bit. And what that’s going to do is it’s going to come up and it’s going to kind of diffusely sprinkle activity across the nervous system.  </p>



<p>Mac    00:54:27    So it’s not just going to release one little plan, which is kind of the traditional way I’ve been taught about the sort of these segregated basic, um, ganglia thalamocortical loops. But rather what it’s going to do is it’s going to basically flattened the landscape, but just in that one little particular location. So if I’m shooting a free throw and I miss 10 times in a row, maybe it’ll just let me tuck my elbow in. Or as Paul said, move my feet to the side in contrast as the cerebellum balloons, every action that I do every time I make a movement, there’s a little reference copy. That’s coming down to the ponds, shoots into the cerebellum, does its expansion comes up to the cerebral cortex back to the date on the nuclear, back to the thalamus, to the cortex, right? That loop is going to be saying every time you make that move over time, I want you to learn that particular one that led to you getting the shots in and ignore the ones that you did.  </p>



<p>Mac    00:55:14    Now, the caveat here is that the cerebellum looks like it loons in a slightly different way than the, of anger. It’s not learning via reward. Prediction. Error is, um, it’s, it’s a supervised learning where th th there’s a, a template and a, and an input, and you try to match them together. But with that detail aside, the cerebellum Laurence has sort of take over that function and do it in a really precise way. And so in the language of dynamical systems, we could think, well, if there’s an attractor and the basal ganglia gets involved, that’s going to sort of flatten it out a little bit. So that if, if option a was the one you were doing, now, you could try B or you could try C. And if you’re using the cerebellum, if Amy shows up and that’s the start of your sequence, let’s just get to pee real quick and then see an end date.  </p>



<p>Mac    00:55:52    And then it’s going to let you quickly move through the sequence and a really automatized way. You don’t have to think about it anymore. So if I get all of that movement down now of a sudden, I can have a conversation with someone while I’m doing shooting free throws, or I can be thinking of a project in some other part of my brain while I’m actually executing really autonomously. And that’s where this idea, I think, um, you know, links back to some really fascinating psychological phenomenon that we don’t have great explanations for in the brain, like how you can be driving home from work and completely forget that you drove home. You don’t even notice it the whole time. Right? You did that job expertly in that context. Um, but you, some, you somehow weren’t aware of it. And so there’s all these really, really fascinating, um, sort of mechanisms out there in the brain like the cerebella loop, but I think are really important for some of those functions, um, that I think, you know, maybe this moves us a little bit closer to me, how to make hypotheses about how we do things as are the automatically, um, like the driving while talking.  </p>



<p>Paul    00:56:47    Yeah. I mean, so you talk in the paper and speculate about system one and system two and give roles for the cerebellar loop and the basal ganglia loop there as well. Then maybe, maybe you could just brief summarize that and then, uh, I want to bring it home and ask you a few questions about the thalamus in particular.  </p>



<p>Mac    00:57:04    Oh yeah, sure. Um, so the system wants system to kind of label is sort of part of a broader group of what we would call Juul process theories. And these are really, you know, have been around for a really long time. Um, Shifrin and Snyder did a lot of the really great early cognitive neuroscience work. And they’ve got these two papers that were like both a thousand pages long and full of experiments. Um, but, uh, Gordon Logan also did some really cool stuff with the sort of inner loop out loop, uh, ideas. So this is a sort of an old idea, but Conaman in his sort of brilliant way and kind of really stepped down this really kind of lovely kind of label for it. Um, and the idea is that, um, there are kind of different modes in which we can interact with cognitive problems.  </p>



<p>Mac    00:57:42    Um, a lot of the time, in fact, the vast majority of the time, where in what he calls system one, which is where we have preconceived notions for a particular, let’s say context, and let me just act really quickly. We use what he would call a kind of heuristic. We say, if the context maps something that I think I’ve kind of already experienced before, I kind of know about, I’m just going to act in this particular way. Um, and a lot of the time, I think we take this for granted, it’s actually really useful in a lot of senses. Think about how many systems, one type functions you have for language, for example, in your language that you understand, you can pause almost any kind of word, any kind of phrase. You can quickly work out if someone’s used a verb around the wrong way with a noun.  </p>



<p>Mac    00:58:22    Whereas if you walk into a foreign country where you’ve never heard the language before, it absolutely sounds like, uh, you know, experiments will be off musical. Something like you just can’t pause anything. Even the, even the cadence is really foreign. Um, so I think systems one is everywhere and it’s sort of like our default way of interacting with problems. And if that’s not working, if systems one can’t handle things well, um, or we do it in a deliberate fashion and agency is a whole other question that you probably need to talk to way smarter people about. Um, but if we do things in a deliberate way, that’s like kicking the system up into what we call system to like, which is really deliberate, much slower and much more kind of conscious processing where you’ve got a focus and you’re really kind of doing work on that.  </p>



<p>Mac    00:59:06    Um, and when I was thinking through some of the implications for this sort of neuroanatomical perspective, um, that, uh, that, uh, that I’ve been speaking about, one of the things that I think is a sort of intuitive way for people with that background in psychology to kind of make contact with this is to think of, to a first approximation, these kinds of cortico cerebellar cortical loops. There’s a little bit like systems, one, like you have a particular action and you sort of jumped to what, what to come next. Whereas the basal ganglia system is a little bit more systems too. Like it’s sort of allowing you to focus on one particular part of the system, um, and, and really sort of drill down on it. Um, now where it gets really interesting is that, you know, I mentioned before that the basal ganglia has this sort of, um, contact with the matrix elements, right, which produces this diffuse projection to the cortex.  </p>



<p>Mac    00:59:50    Um, but what it does up there is not necessarily just sort of sprinkle activity everywhere. What it’s actually doing is it’s activating the apical dendrites of the, uh, these really massively a five parameter neurons. And this is where the story really hit home, hit home for me. Um, there’s some really beautiful work. Matthew lock-in, uh, is sort of one of the pioneers of this, but there’s many others that have shown that in the cerebral cortex, the apical dendrites of these critical parameter ons are actually separated away from the cell body in a really fascinating way. It’s almost like they’ve been pulled apart. Like you’re sort of stretching a band to make them as far apart as possible. And what that means is that you can actually kind of do contextual processing on these. What you can have is the context that comes in to the apical dendrites changes the firing properties of the cell body, but only when it exceeds some threshold.  </p>



<p>Mac    01:00:38    So you need to have lots and lots and lots of inputs, or the neuromodulatory system has to come in and close a little leak channel that allows the system to now fire in a different kind of a firing mode. And so what that means is that the basal ganglia can increase the variability of the system, but still create the action of the system. It can still allow the winning coalition in quote unquote to fire and have an influence over the next brain state and to sort of enact that change. And I actually think this sort of actually comes back to the predictive processing, sorry, a little bit, one of the parts of the round valid model that I find, um, a little bit counterintuitive is that if you get a good match, you just sort of don’t do anything. So if I expect your voice Paul, and then you speak and I get a match, I should just sort of, you know, get out of the way everything’s fine.  </p>



<p>Mac    01:01:25    But to me to be adaptive, that system needs to act on the thing that they guessed was there and actually was right. If I guess that a saber tooth tiger runs through the, through the door and it actually does. And I just sit here going, oh, good. I made a good prediction. You know, there we go. Don’t do anything I’m cooked. Right? So what I’d like to do is have a system that can enact that change and get moving. And the beauty of Matthew Hawkins, a cellular model is it allows that prior that hits the apical dendrites and the evidence that comes in to the, the cell body to actually make a difference to cause the cell to perse fire, to then be the one that goes down to the cerebellum and says, what, what, what should I do about this to go down to the pencil canyon and say, what are my options right now?  </p>



<p>Mac    01:02:05    So it kind of bakes it in together in this way that you can see that the elements of the, of the round valid model are undoubtedly related to the nervous system. You do look like you make priors and look at evidence and match them up. But maybe what we do with that information might a little bit different than we originally intended. And we can think a little bit about what that might mean for some of our models and, and, uh, you know, again, this is a step in the direction where we need to do a lot more empirical work to really constrain these ideas. These are really theoretical concepts, but it’s exciting time because I think we have the tools to start to do these experiments.  </p>



<p>Paul    01:02:38    So I’m going to ask you a question that, uh, I feel is in danger of, um, I’ll be, maybe you can laugh in my face because we’ve just been talking about how important the whole system is, right. And to consider the interactions of the system, because it’s really the emergent properties from that. That’s important. But what I’m going to ask is the bottom line of how to think about the thalamus then. So should we think about the basal ganglia as always going full, full bore and the cerebellum has always going full bore and the thalamus controlling them, or mediating them, or nudging them to change that dynamical landscape to put us in different regimes of action and thought, how do you think about that thalamus in that role?  </p>



<p>Mac    01:03:20    Yeah, that’s a really great question, Paul. Um, I, I think part of the problem is that I don’t think the thalamus is really just one thing at the end of the day. I think it’s in some sense, it is, it, it ha it has a, you know, a particular typology where there’s a bunch of glutamatergic cells that don’t contact with one a lot, and then a bunch of inhibitory cells that compete with each other from the RTN and, and it protects to the cortex and receives input. So in some sense, you can think about it under the single label, but in another sense, when we drill down into the details that that kind of unity starts to break apart. And so I think it really depends on the question you’re asking. Um, if we’re thinking about the coal thalamic nuclear, I do think that the message passing story’s a good enough first approximation to think about it.  </p>



<p>Mac    01:04:06    Um, with the matrix alignment nuclei, we like to think about it a little bit, like changing the brain state, um, kind of, uh, sort of increasing the excitability of a population, but that’s, again, a first approximation. There’s a lot of subtlety there. Um, one connection we haven’t talked about, which is, again, is systematically under studied in the literature, is this massive projection from the thalamus, these matrix plot population, very particular ones called the intro lemonade nuclei, like the parafollicular nucleus and the central median. They actually project really strongly to the striatum, uh, and, and have a really strong gaining influence over both the spiny projection neurons in the culinary getting ruins. And that structure, that interaction is something that we really don’t have great empirical work on. There are people working on it. Um, but it’s, it’s an area that needs to evolve for us to really understand what’s going on.  </p>



<p>Mac    01:04:53    It’s really important structure. So overall, I don’t think that there’s going to be sort of any one label, but, you know, th th the, the, the kind of picture you were painting just before in your summary, I think is a good way to think about it. Right? One thing we do know about the thalamus, um, is that it it’s, um, has stock changes between sleep and wake or between Anastasia and wake. So Messiah, um, really convincingly showed, you know, 20, 30 years ago, that if you look at the foreign population of filmic neurons during sleep, there, there’s a particular calcium channel that’s closed. And then when, when you wake up, it opens up, well, maybe I got that role. It could be open and close up, it’s a switch. And then when that changes, you can then get a confirmational change in the kinds of interactions that can occur.  </p>



<p>Mac    01:05:38    And all of a sudden you get this emergence of high-frequency desynchronized cortical, EEG. Um, and so I think in a way what the thalamus is doing is really controlling the state of the system, right? And then any influence that happens to it all the time, whether it be a cortico influence, a basal ganglia influence, cerebella a curricular influence, a neuromodulatory influence is going to shape and change the way that the state will change over time, which is one of the most crucial factors for determining how we do what we do. So I think of it as an absolutely core pot of, of the central nervous system and, you know, really, you know, at a kind of big picture level, what I was trying to do with that paper, a lot of what I’m trying to do in my research program is to sort of shine light on these kinds of, uh, you know, areas of, of the nervous system that we haven’t really thought about from that kind of systems level perspective, as much to sort of show that they’re playing a really crucial role. Um, but you know, it’s going to be up to people like Michael lasso that have all the, like really crazy positive genetic tools that let us really go in and, and, you know, not out the details of these circuits that I think will actually really ended up carrying the day.  </p>



<p>Paul    01:06:45    Okay. That was satisfying. Thanks for not laughing in my face. I mean, I, I think that I still have this, um, bias to think about a controller to think it was somewhat like of a homonculus right. That, uh, there’s these different states in the brain that they’re being switched. How are they being switched? There has to be a controller. Oh, maybe that’s the thalamus, but on the other hand, if you consider like the whole systems level processing, uh, it’s less satisfying to say self-organizing, but I don’t know. Do you think that, that we need to think about it in that respect as a self-organizing system that we’ll just have to accept it? Uh, not that that’s disappointing, it’s just, uh, I think people like me with a simple mind, tend to think in a homonculus fashion and think something’s in charge, you know?  </p>



<p>Mac    01:07:31    Yeah. Um, you know, uh, I, I don’t remember who said this quote, but if, if not, we should just give it to ourselves Paul, but biology is weirder than you could ever imagine. Right. Um, so I think this comes back to the fact that, um, I think once we embrace the, the use of neuroscience and we, we, we really kind of put to the side, any concern we have about trying to deeply understand the thing just yet, let’s just get heuristic approximations, and then we can go from there. Um, I think once you, once you take that perspective, you start to realize that making some, uh, discovery or having some hypothesis makes you make commitments to the shape of the system or the way it interacts in ways that you may not have anticipated. And, uh, the dynamical systems stance is one that I think was kind of inevitable when I started reading more about neuroanatomy, because you need that language to understand how these things are interacting with one another, because lack of the brain is a complex system that has lots of interacting parts.  </p>



<p>Mac    01:08:38    And the main thing they can kind of do is nudge one another, or kind of increase the excitability or the receptivity of each other over time. They, they can’t do anything about the past. Well they can do is about the near future. Um, and so in a way you need the dynamical systems language to kind of help you, uh, even frame the things that you’re seeing in, in, in sensible ways. And one of the things that I think comes with the dynamical systems language is this self organizing concepts and really weird things like there’s a term called circular causality. And I was talking about circular causality back in the day as like a slap on the wrist. Like, don’t do this because you’re making the wrong, you’re putting the cart before the horse. And you’re assuming your answer right? What it means in this language.  </p>



<p>Mac    01:09:19    Alicia, Gerard has a really beautiful book about this called dynamics in action. And what she argues is that there’s different kinds of causality happening and the bottom, you know, the, the kind of traditional billion bull style causality that we really like to think about where one neuron contacts, another neuron, and then kicks a message onto another neuron and onto another neuron. And there’s like this little line of chain of command, kind of a thing, maybe true in some sense, but there’s also this whole other set of what we would call causal rules, where the constraint of the system, the top level configuration system changes what’s possible for the lower level. And this is a really kind of gnarly concept, but one, um, you know, really intuitive example that I think kind of helps to kind of, um, play the sounds from George Ellis is that the kind of software that you’re running on your computer, whether it’s, you know, Microsoft Excel or, you know, um, Microsoft word, the same key stroke can have a completely different effect on the electrons running around in the hardware of your system.  </p>



<p>Mac    01:10:16    So you can have a constraint from the top, the program that’s running really changed the kind of way that the system can then evolve over time and the kind of way that the different billable logic plays out. Um, and so, you know, I, I didn’t pretend to be an expert on this stuff, but I’m reminded of, you know, like the episode you did with mock  on inter into activism. You’re thinking more about a systems far from equilibrium, trying to figure out how to kind of navigate a complex world, take the affordances available to them, to, to solve ongoing problems that pull them a little bit further away from equilibrium than they’d like to be. And that kind of a system is going to have these really weird features of self-organization. And we had dynamical evolution over time, and this circular causality that we need to understand better if we want to describe them in the way that they are, rather than the way we would like them to be without kind of traditional kind of a, to B to C causal models.  </p>



<p>Paul    01:11:09    So thinking about your medical background and, and causality and causes and complexity, do you, what do you see as the prospects for being able to nudge these systems, um, in a therapeutic way and, or, you know, in a clinical way, right? Like, uh, are we close to being able to, like, let’s say your theory is completely correct. Right. Would you feel comfortable getting in there and pushing things around, uh, to treat, uh, people’s personalities?  </p>



<p>Mac    01:11:42    Um, yeah, this is a great question. And one that, um, I feel guilty about a lot, particularly when my mother reminds me that I, uh, left a job in medicine to work in academia. Um, my father was quite happy about it. Yeah. He, my father said, well, look, now you’re a real doctor when I got my PhD.  </p>



<p>Paul    01:11:58    Oh, wow. That’s the opposite of what my grandmother says.  </p>



<p>Mac    01:12:03    Um, my wife always teases me as well that, um, if I, if I get sick of academia, I can go back and work in medicine where I can actually make a real salary. Um, but  </p>



<p>Paul    01:12:13    Yeah, podcasting,  </p>



<p>Mac    01:12:15    Yeah, it’s, it’s a really hard problem. And I think it’s, it’s worth again, you mentioned that this is a, the first step in what I imagine is quite a long path towards finding out the nitty gritty details. Um, but yeah, I think I’m excited for the opportunities that will be available in the coming years for areas of medicine that have traditionally not had really kind of, um, uh, treatments with punch, right. We, one of our best solutions, if someone shows up into the clinic with a thought disorder is to just sort of block all the catecholomines in the brain. And, you know, they don’t really have a great life after that. They eat too much because they can’t control their appetite and they don’t really have any ability to be, um, quite creative or interact with people. But, you know, at least they’re not having hallucinations.  </p>



<p>Mac    01:13:05    Um, and so I look forward to trying to work out how we can come up with better solutions for these kinds of folks. And, and I don’t think, uh, at the end of the day, it’ll be just about, you know, stimulating the area at the right time. I think that a lot of what makes our nervous systems so fascinating. And again, it’s one of these problems it’s in the really too hard basket is that we really are age antic, uh, organisms. We, we do things we act in the world. And part of the challenge, I think is that with the nervous system, it’s like that on steroids. So if you give someone a treatment while that might set you off on the right track, you could just as easily be the thing that your nervous system works desperately to avoid. And it could be the, the, um, you know, the downregulation of particular types of receptors or something like that that ends up carrying the day and having confirmed clinical benefit rather than the primary treatment option.  </p>



<p>Mac    01:13:57    Um, you know, deep brain stimulation in Parkinson’s is another great example, stick electrode in this part of your brain, turn it on with really high frequency. And all of a sudden things get better unless they don’t. And if they don’t tweak it a bit, and we really fully understand exactly why it gets better. And so I think any sophistication in our appreciation of the kind of workings of the nervous system at that systems level is going to lend itself towards better suggestions for therapy, but they’ll ultimately be about dynamic into interventions rather than just like one-off gave you the tablet. Don’t worry. You’re good. Now, you know, in a way the immune system’s kind of coasting, right? You have a vaccination, uh, and then all of a sudden your immune system can kind of cope with the next insult. I think the nervous system has a lot of the elements of the immune system in it. It’s very variable. It’s really good at dealing with, uh, finding patterns in the environment, but it’s much more dynamic in a, in a, in a specific sense where you can’t just kind of like drop a rock in the pond and then hope that that solves the problem. And I think it’s going to be much more dynamic in the future.  </p>



<p>Paul    01:15:02    So Mack, you, um, spent a lot of time learning anatomy and the connections and, um, thinking about, uh, structures besides cortex, for whatever reason. Uh, and then you weren’t satisfied enough. So thinking about the whole system, now you’re bringing in neuromodulators and neuro-transmitters into how they interact at the systems level. First of all, what the hell is wrong with you, man? And secondly, uh, what’s, what’s the story there what’s going on with, with, uh, your current work on, on the neuromodulator neuromodulatory system?  </p>



<p>Mac    01:15:34    Yeah, so th there’s a, a kind of another fun story, uh, to kind of unpack the, um, so during my post-doc, I was working with Ross Poltrack, uh, at Stanford on, you know, functional MRI, um, and trying to think about it from the systems perspective. So when I started working with Ross, the kind of really hot topic in the field was what we called dynamic functional connectivity. And there’s a lot to unpack in that. And none of the terms really quite, uh, capture what they’re supposed to catch up, but the concept was something like, instead of looking at the correlation between two blood flow time series over, let’s say a 10 minute window, let’s unpack it into smaller windows and then see what happens over those smaller windows. Let’s see if there’s any fluctuations such that there’s not a sort of stationarity to the system, but rather this kind of interesting dynamics and the tack that we were taking was to borrow a really beautiful idea from Kymera and Emeril, who did this really lovely analysis of networks.  </p>



<p>Mac    01:16:31    There were metabolic networks and that of 2005 paper, um, where they basically took a metabolic network co-expression of particular, um, metabolic byproducts. And then they looked at the interaction between all of those as a network. And then they said, well, this is a really hard thing to describe. It’s really not only depends on how you look at it. It’s really multi-dimensional. Maybe if we summarize it into a bunch of little communities, we’ll run some kind of a clustering on it to find tight little, little communities or what we call modules. And then when we have that information, let’s start to ask, well, relative to that modular breakdown, how each of the different metabolomics kind of related to the whole system. And it turns out that one of the, the framing that they use at least is called a framing. So you calculate something like the, between connection, how much was an individual metabolic, uh, signature, like the rest of the system versus its own little group and a local one, which we call the module degrees, each score.  </p>



<p>Mac    01:17:21    That’s basically telling you how much, like you were your little group, you were a module. Um, and what we were trying to do basically, was to take their framing and to put it onto these little dynamic networks, quote, unquote, that we were measuring, right? So we did, someone’s lying in the scanner. You get a bunch of data, you break it up into little chunks, you calculate one of these networks and you look at the configuration over time. And one of the, you know, through a long, long process, that involved really great questions. I mean, one of the things that I love about Ross and his group is that there’s a real kind of poignancy of really getting to the bottom of problems and trying to kind of not fool yourself, uh, which I think is really, really easy to do in a, in a big space like this.  </p>



<p>Mac    01:17:59    Um, and in fact, it was two of his grad students in a lab presentation that pointed out that we had brought along this, uh, old, um, way that they chopped up the data in the original experiment. They basically like set boundaries and then CA um, characterized parts of this little space into different bins. And we were just tracking them over time. And the grad students both said, why don’t you just get rid of those bins, just look at the thing over time. And so we made this little histogram at joint histogram and then watched a movie of it. And it was just like, you know, a ton of bricks. There’s this big, massive fluctuation over time, uh, between these extremes of a really interconnected system with lots and lots of those with between connections. And then it really isolated system with these little, these little within connections, and we call those an integrated and a segregated network.  </p>



<p>Mac    01:18:43    Um, uh, and so that was amazing, right? Wow. We found this really interesting thing. And then like, well, what does it mean that we were stuck with this really hard problem, right? It was like, all right, back to the literature, you know, it was like a common theme here, right? You, you find a thing and then you go, what could it be? And then you spend a bunch of time, you know, it’s important for the students to realize this, you spend a bunch of time meandering around the world, reading a cool paper, being inspired by some weird question that your friend asks you or your dad asks you about your son standing up effortlessly. These things often strike you when you least expect them. But I think there an underappreciated aspect of, of science, or at least the part of science that I really love of that kind of wallowing in your uncertainty until it resolves itself, I think is one of my favorite pots.  </p>



<p>Mac    01:19:29    Um, and so we spent a long time, I remember a bunch of the postdocs in the lab being like, dude, when are you going to like, work out what this is? This is kind of getting boring with, with tight of you talking about integration, segregation everywhere. You see it. Um, and, uh, so Russ and I decided let’s go meet a bunch of different professors around Stanford. This is again, one of the benefits of being in a place like Stanford is you just walk down the road and you’ve like, you’re meeting up with like a brilliant economist or a brilliant information theoretician. Um, and, uh, it was actually an economist that, that, that put us on the right track. We were talking to Matthew Jackson, he’s done a lot of really interesting working in networks, uh, in, in, uh, in economics. And I asked him, you know, Matthew, are there any pots, any ways in, in economics that you can cause the system to kind of do a configuration or change like we’re seeing, could you like, you know, uh, remove tax.  </p>



<p>Mac    01:20:17    And then all of a sudden everyone goes out and spends money and they all look like one another, or could you like, you know, boost people over here with a little bit of money and if you boost it just the right people trickle down, economics might happen in the system. Might, you know, if that was a real thing, um, the system might change. Um, and, uh, and then he said, no. And I was like, oh man, I really wish I had a great answer for this. But then he said, surely there are parts of the brain that, you know, they don’t have to be big. I can just sort of project kind of to the rest of the brain. And then they could kind of change how the different parts interact. And honestly, it felt like the old Monte Python sketch where someone slacks on place at the face of the fish, I was like, oh my God, of course, it’s the ascending arousal system.  </p>



<p>Mac    01:20:56    Right? Of course that’s, what’s doing this. And so we then went and did some empirical tests and did some computational modeling with Michael break spear. And I’ve now, you know, I’m working on doing energy landscape analysis with my extremely brilliant, um, postdocs from physics and desperately trying to understand what they’re doing, uh, to try to work out what is the best way to kind of analyze the system, but at its core, I think the ascending arousal system is just so important for shaping the dynamics of the rest of the system. And so, um, you know, we probably don’t have time to kind of get into this properly, but, um, the kind of cliff notes are that instead of the, um, the system using glutamate and GABA as its main neurotransmitter, which we kind of think of as, you know, either starting off an action potential or quashing one, um, the main, uh, effectors in the system are more like a hormonal like structures that, um, uh, actually, uh, often derived from amino acids. So actually this is this the point for my, um, my dad joke, Paul, um, what, uh, how do you know that, um, neuromodulators, uh, really rude  </p>



<p>Mac    01:22:09    Cause their amino acid.  </p>



<p>Paul    01:22:13    Oh man, thank God you did the dad joke.  </p>



<p>Mac    01:22:19    Um, I’m a dad I’m allowed to make drinks. So yeah. So you’ve got these, um, these really highly conserved systems and your brainstem and full brain that taken amino acids from your diet, or they use the byproducts of the Krebs cycle, uh, and they convert them into little intermediaries that then go off and have little, they have receptors that are act like a kind of little lock and key mechanism that they’re called G protein coupled receptors. And the G protein, couple of steps are really different than the AMPA and the MDA receptors that we typically think of rather than letting ions sort of shuttle into the cell in particular ways, what they do is they create confirmational change in the internal state so they can release calcium, they can open voltage, gated, ion channels. They can also do all kinds of cool stuff. They can act like transcription factors.  </p>



<p>Mac    01:23:04    In fact, there were a couple of recent papers where they showed that molecules like serotonin and dopamine actually can bind to the DNA like epigenetic modification and can change the likelihood of an animal ultimately recovering or not from an addiction. Do they treat, I mean, it’s, it’s mind blowing stuff. Um, and when, so basically this system has a really different effect on the nervous system than the traditional kind of glutamatergic, GABAergic, um, effects. And so one of the things that we do a lot in my lab is try to think about what the different subtle differences amongst those different neuromodulatory systems, how they interact, what it would mean to release acetylcholine here, but noradrenaline there, you know, what is serotonin doing? And does it oppose dopamine or does it actually work with a, we, we try to ask these kinds of questions about the neuromodulatory system in its details and how it interacts with the rest of the nervous system. Because again, it’s going to have those implications for the state changes over time, the flattening or the deepening of the attractor landscape, or the ability for this coalition of neurons to form and then be alive and around long enough to interact with another system that you need in order to solve the complex problem in front of you. So I think of it as quite quite important for those systems level things.  </p>



<p>Paul    01:24:12    So again, uh, so I’m assuming the thalamus controls all this right now. I’m just kidding. But again, we have to think in terms of self-organization and complexity or, or, you know, at this point, actually, you know, thinking about the arousal system, we also have to think about life processes and metabolic processes, right. Which is a part of the whole integrated system.  </p>



<p>Mac    01:24:35    It’s part of the fun, right? Yeah. So these, so these, these systems are, um, you know, feedback and a massive feedback control from both themselves as well as a number of other structures. The hypothalamus is the, the key, the key one, but other areas like the Habana ULA really important, the para Dr. Gray. Um, and then, you know, there’s also, you know, cortical projections to these systems, um, that, or, you know, uh, basal ganglia in the case of dopamine and serotonin. So these systems that you can think about them as sort of having this massive confirmational change in different arousal states. So for example, um, I talked before about how the thalamus wakes up, uh, uh, when you wake up, um, a lot of that is due to acetylcholine that comes up from the lateral dorsal tegmental in the, in the brainstem that kind of kicks off that confirmational change nor gentlemen plays a role as well.  </p>



<p>Mac    01:25:28    Um, and so you could think about these things as, as having this massive change that changes the arousal state, but then subtle fluctuations in those neuromodulators can also change what you can do right now. So one of my favorite examples from the literature, it comes from Susan, Sarah, and Sebastian Baret, and they talk about what they call the network reset phenomenon with noradrenaline or norepinephrine for your north American listeners. And the idea here is that, um, if you, if you imagine a widespread system that can change the receptivity of the system, what we call the gain of the system that can kind of make it more likely for a spike that comes in to get propagated, to, to have some meaningful output. Um, one way that that can be really beneficial for an organism is if you’re, if you’re sitting in the, in the scrub, you know, looking for bugs, your little marsupial, and then you hear a rustle in the bushes, if you’re too zoned in and focused on your meal, you won’t notice that there are predators snuck up on you.  </p>



<p>Mac    01:26:20    It’s going to come and jump in at you. Whereas if you have that big burst of noradrenaline, now, all of a sudden, whatever it is, actually in your environment, rather than what you want to be in your environment, the food and exploiting that food. Now, all of a sudden the system is now susceptible to whatever the most salient signal is or the most, um, uh, most, um, important for your adaptive, ongoing life, um, can then carry the day and you can react to that news, um, stimuli rather than the one that you were reacting to in the moment. And I think this kind of flexibility is, is kind of informative for the kind of more adaptive stories that we need to be thinking about, to understand how a nervous system could benefit an organism over massive swaths of evolutionary time. Like if you don’t have that system in there, if you don’t have a system in there that can help you focus down on something like we think maybe acetylcholine helps with or something that might help you work out what’s valuable or not in a really complex kind of, uh, sort of space or a temporarily extended landscape that we live in, we think document might help with, and you’re not going to act in the most adaptive way you possibly can.  </p>



<p>Mac    01:27:20    And so I, as someone who’s really interested in that phylogenetic perspective, I think neuromodulators play an extremely crucial role in that process. Um, and also, you know, um, a massive cytopathology in both the developing brain, as well as in your generation, uh, and a bunch of psychiatric conditions. So I think there’s, there’s a lot more to be learned about this space, uh, that I think would really help us to be thinking about that sort of systems level interaction as it plays out.  </p>



<p>Paul    01:27:48    So Mack, I don’t normally have such a, a systems neuroscience, heavy conversation on the podcast, and you’ve taken us on quite a tour, um, over different brain regions and how they interact and the function and the anatomy. And now the neuromodulators, uh, the, the portion of my podcast audience who is in the AI research world and or industry, uh, is probably feeling helpless and lost, let alone many neuroscientists. Right. So what I want to ask you is what do you see when you look at a deep learning network or, and, or like a reinforcement learning system or a deep reinforcement learning system, how do you think about the modern, uh, AI approach?  </p>



<p>Mac    01:28:32    Yeah. Um, I apologize if, uh, if the content of the podcast has been to,  </p>



<p>Paul    01:28:38    Um, people people need to get, yeah. People need to get slapped upside the head with some facts and some theory. Yeah.  </p>



<p>Mac    01:28:46    Yeah. There, there are pretty pictures in the papers if you, if you’d like to see some of them laid out a little bit. Um, so yeah, look, um, you know, I think AI is a, is a fascinating field. Um, and you know, I think I sort of sympathize with some of the people you’ve had on podcasts. So think of it as almost slightly orthogonal to kind of neuroscience in a way, and that it it’s sort of grown into its own fascinating space with lots of idiosyncrasies that, uh, not particularly inspired by the brain, but don’t need to be, they just sort of are the fact that it works in the way that it does. So in, you know, in my, in my day-to-day. So I think of them as sort of orthogonal. Um, I do see them, they’re being really great opportunities for communication between those, uh, those two spaces, you know, um, Blake Richard’s work is something I absolutely love where he’s thinking about those layer five permanent, or as I was talking about before and credit assignment, and trying to think about how you could have, you know, messages pass up the hierarchy, but then also learn which ones I should reinforce and increase the connection strengths between which ones I shouldn’t.  </p>



<p>Mac    01:29:48    And I absolutely love all that work, and it’s not something that I, uh, I have a lot of experience in. I love reading that literature and thinking about how to kind of, um, integrate the ideas.  </p>



<p>Paul    01:30:00    But what about the idea of, so I’m thinking about deep reinforcement learning in particular and heavy on the reinforcement learning idea here, you know, you have people like David silver, uh, and his colleagues writing a paper. I think it was called reward is enough, but essentially the claim is that all we need is reward in a reinforcement learning or deep reinforcement learning system that is going to lead us to AGI, right? So it’s not just tools that AI is after there. Uh, there’s a certain sector, uh, that is optimistic and interested in building quote unquote, true intelligence, whatever the hell that is. But, um, do you see that as orthogonal or do you see like a reward system as in enough?  </p>



<p>Mac    01:30:50    Um, yeah, it’s a great question. I’ll have to have to read the paper. I haven’t haven’t come across it. Um, I I’d come back to something that I think Blake Richards actually said on your podcast way back, which, which is this lovely analogy of if all you had was a neuromodulatory system to guide you would be a little bit like playing a billion dimensional huddle call. I really loved that analogy that really, it really kind of, it really kind of knocked me off for a loop when, when he said it. And I think it’s really a really profound way to think about, uh, the problem, the computational problem of just having an arousal system. But when I started thinking about that a little bit, um, this actually was, uh, you know, before I’d written the paper that we’ve talked about and originally the paper we talked about actually had a whole other section on the arousal system, but, um, it got, so, um, I don’t know how much swearing I’m allowed to do on your podcast, but it got absolutely destroyed by people because they just, they didn’t like it and whatever I’ll move on, but  </p>



<p>Paul    01:31:52    He got fucked by people.  </p>



<p>Mac    01:31:55    No, there was actually worse than that. Yeah. Um, it’s, you know, it’s, it’s really in the morning Australia, I’ll try and keep my language. Um, so, um, yeah, so I ended up taking it out and it’s been, it’s formed the basis of multiple other small things that we’ve done on the side. But one of the things that I think could help you solve that multi-dimensional, um, hot or cold problem is if the system that is processing the information, quote unquote, uh, could non-linearly interact with the arousal system, right? So it’s not now, it’s, you’re not now just following the Dover main gradient or following the noradrenaline gradient or the, what you’re saying is if the noradrenaline gradient is high, or if you get a phasic burst of dopamine, whatever it is active at that time, notice it and take advantage of it. Use that window.  </p>



<p>Mac    01:32:41    You just had to, to like turn into a purse, firing neurons, to create some long term potentiation. And all of a sudden you’re not playing billion dimensional, uh, hot or cold anymore. What you’re doing is you’re actually playing the game, but, you know, with a little cheat code, which is that if you get even close to something, you can be like, oh wait, forget about everything else. Now I know I’m in the right location. And then you play it again, building dimensional, uh, you know, hot or cold, but only in that one little pocket. And then you get a little bit closer and then you do it again, what’s that that’s grading to set, right? Like that’s kind of, you know, uh, an algorithm that we know works really, really well for a lot of these learning, um, situation. So, um, I’m really eager to see, you know, the much more clever people, uh, than I going into this space and trying to figure out how these systems work together. Um, I think it’s a really interesting time to be a neuroscientist and to try to apply these tools of dynamical systems, but also try to make them make contact with learning. And if anyone wants to talk, you know, hit me up, I’m on Twitter, we can chat,  </p>



<p>Paul    01:33:41    Uh, really excited and optimistic. And I feel happy for you because my take is that you feel like you’re in a great position in your career and that the future is wide open. Do I read you correctly there?  </p>



<p>Mac    01:33:58    Yeah, I think that’s fair. I I’ve been incredibly fortunate, Paul. Um, you know, I had a really great PhD supervisor who supported me. I then got a fellowship to go over and do research on their work with a world leader in SMRI who then put me in contact with a world leader in computational modeling and then came back to Australia and I’ve got another research on the position where I can spend time to think and read. Um, I live a little bit further away from campus, uh, on the central coast of, of Australia, which has this beautiful, quiet area, lovely beaches, lovely Bush walks and an extremely patient and supportive wife that, uh, you know, allows me to kind of wander around in my head all the day. And so, and, and just a brilliant team around me, great collaborators, lovely, really collegiate and really sort of impassioned young scientists that really pushed me and forced me to kind of stay on my toes the whole time. So I feel incredibly fortunate to, to be in this position. And so my optimism, I think, comes from a just complete lack of understanding of how I’ve found myself in the position that I found myself in, and I want to do as much good work as I can, and I want to be as engaged with the science as I can. Um, but it’s, it’s really just dumb luck to be perfectly  </p>



<p>Paul    01:35:12    Well. Okay. So there’s a lot to unpack there, um, because sure luck, I, I think luck is a huge factor and that’s wonderful that you acknowledged that. Uh, but it also, you know, as you said before, we are a gentle, you have to make those decisions, right? So, uh, it’s not dumb luck. It’s more like serendipity and part of your serendipity, I would imagine arises because of your work ethic, but also your range of interests. And what I’m getting at is that the, the nature of your work and its range, uh, it really takes so much effort, uh, to what, what was your phrase wander in your own, um, ignorance wandering? What was the phrase?  </p>



<p>Mac    01:35:57    It was something like  </p>



<p>Paul    01:35:58    Wondering your own curiosity. Yeah, my ISE, I say ignorance because I’m more of a pessimist, I suppose, but so, so you, you have to like swim in these disparate facts, right. That, uh, in, in the unknown and that, it’s awesome. It’s awesome. But it also takes a lot of work. Right. And focus. So here, so this is what I’m getting at. Uh, how do you confer that ability to other people? Like what did, what the question is? What advice would you give to someone who is interested in trying to think across scales, across temporal scales, across, uh, physical scales, across systems, inter system, uh, with the brain and think holistically, somewhat like you do. Um, you know, not everyone is suited nor, um, would want to do something like this because it’s, it’s nice to just focus on one little brain area and what it might be doing, but do you, do you have advice to your incoming students and to those that you talk to who are kind of wowed by this kind of holistic understanding you possess?  </p>



<p>Mac    01:37:04    Uh, I, you can’t see this on the podcast, but I’m blushing, um, as well. Um, I played football for a number of years, um, back when I was younger and could still move without feeling like a creaky old, uh, kind of chair that’s been lifted out in the rain for a few months. Um, and my team was really good and, uh, we won a bunch of championships and we had a lot of fun. We had a really great tight-knit group, but I was never the best player I was always on a good team, but I had to rely on this guy to make this play on this other guy to make that play. And I think that had a huge, a huge effect on me. I think it hit me really hard back in those days that if you want to achieve something, uh, that’s really big and, and requires that, um, coordination, you’ve got to be able to be a part of a team and you’ve got to make that really integral in what you do.  </p>



<p>Mac    01:38:02    And I’ve carried that through with me and I’ve tried really hard to, you can maybe talk to my students and collaborators on the side and see if it’s reflecting the feedback. But I consider that to be absolutely crucial is that I’m not doing anything on my own. Everything that I do is about the team that I’m in and the, and the group of scientists that I’m fortunate enough to work with. Um, so that I think is one pot. And I think another thing that comes with that perspective is that you should never ever feel like all the pressure’s on you to do it, everything. Um, you, I think if, if you follow your curiosity, if you are dissatisfied with answers that don’t resonate with what makes sense to you then I think you naturally play that little game of grading descent across the landscape in a really fun way.  </p>



<p>Mac    01:38:51    And as, as you said, Paul, it doesn’t always make sense. Um, and there are pots that are really frustrating and there are gaps in the literature that, that you so desperately want filled, but aren’t going to be filled for practical reasons. Um, as a, you know, uh, curiosity driven sort of scientists, uh, in a systems level, those gaps are everywhere. And so I think you have to kind of make a lot of, um, guesses. You have to do a lot of hedging, and I think you have to kind of use the soundboard of your collaborative group and, and yourself really to try to kind of work out where the solid bits of ground are and where the parts that are a little bit more flimsy and you have to be willing to kind of live in that murkiness. There’s a lot of benefits, but, but you know, it’s, it’s not, it’s not the kind of thing where you kind of, um, you want to kind of like launch out on that stuff when you haven’t got a position that’s, you know, relatively solid, you know, I’ve only been out being able to do stuff like this because I would get a, you know, a fellowship based on some, you know, some work.  </p>



<p>Mac    01:39:48    And then I would have a few years of stability where I could say, okay, now I’ve got the flexibility to go out and explore a little bit and I can try this and that and the other. And, and, you know, there’s definitely points in this story where, you know, after getting a really bad rejection, uh, you know, you think, man, what did I just waste all this time? Like, what have I done wrong? What have I missed? But then you pick yourself up and you go, okay, I’m going to take the pots to the work. It’s a little bit like, uh, my kids will be watching the Marvel movies and it’s a little bit like at the end of the, all the Ironman ones, but he like takes the thing. He had it, he like throws off like half of the stuff. And then he starts with like the little bit, and then he builds it up.  </p>



<p>Mac    01:40:20    I think that’s an underrated pot of trying to like navigate science as well as to like, they call it, you know, don’t be afraid to kill your darlings. You’ve gotta be really willing to kind of depart with things that will useful, but then ultimately don’t work. And I think, again, this is where it comes back to being part of something bigger than yourself being part of a team and not feeling like if your research question ultimately is a dead end, that it’s your fault or something. You’re a scientist to me is someone who’s out there trying to discover something about the world to understand it a little bit better. And it comes in many different forms. It could be done at the patch clamp, trying to work out how that particular cell worked, or it could be up at the broad level asking about ecological interactions in a complex ecosystem, anywhere in between, you can apply scientific thinking. It’s a process, not a set of facts. And so to me, I, you know, I just feel so fortunate to be a part of this process. It’s a lovely job to have, and I feel like a, an absolute moron most days out of the week, uh, based on how much uncertainty I’ve ever everything. But when you catch those little bits of insight, when the puzzle pieces align, when the little idea clicks to you, oh man, it’s, it makes it so worthwhile.  </p>



<p>Paul    01:41:29    Well, it really comes through that. Uh, you feel fortunate and excited. Do you know where the phrase kill your darlings comes from?  </p>



<p>Mac    01:41:38    I do not actually.  </p>



<p>Paul    01:41:39    Oh, it’s Stephen King in reference to heavy handed editing and how, uh, how appreciative, uh, how beneficial that is. So, uh, Matt, thank you so much. Um, it’s been really fun having you finally come on the show. I’m glad we finally got you on and continue the great work, man.  </p>



<p>Mac    01:41:55    Thanks, Paul  </p>



<p>Paul    01:42:02    Brain inspired is a production of me and you. I don’t do advertisements. You can support the show through Patrion for a trifling amount and get access to the full versions of all the episodes. Plus bonus episodes that focus more on the cultural side, but still have science go to brain inspired.co and find the red Patrion button there to get in touch with me. emailPaul@braininspired.co. The music you hear is by the new year. Find them@thenewyear.net. Thank you for your support. See you next time.  </p>

</div></div>


<p>0:00 – Intro<br />6:32 – Background<br />10:41 – Holistic approach<br />18:19 – Importance of thalamus<br />35:19 – Thalamus circuitry<br />40:30 – Cerebellum<br />46:15 – Predictive processing<br />49:32 – Brain as dynamical attractor landscape<br />56:48 – System 1 and system 2<br />1:02:38 – How to think about the thalamus<br />1:06:45 – Causality in complex systems<br />1:11:09 – Clinical applications<br />1:15:02 – Ascending arousal system and neuromodulators<br />1:27:48 – Implications for AI<br />1:33:40 – Career serendipity<br />1:35:12 – Advice</p>
]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/4c49c06f-e900-4619-8fc2-989964f1310b-121-Mac-Shine-public.mp3" length="99378709"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[




Support the show to get full episodes and join the Discord community.











Mac and I discuss his systems level approach to understanding brains, and his theoretical work suggesting important roles for the thalamus, basal ganglia, and cerebellum, shifting the dynamical landscape of brain function within varying behavioral contexts. We also discuss his recent interest in the ascending arousal system and neuromodulators. Mac thinks the neocortex has been the sole focus of too much neuroscience research, and that the subcortical brain regions and circuits have a much larger role underlying our intelligence.



Shine LabTwitter: @jmacshineRelated papersThe thalamus integrates the macrosystems of the brain to facilitate complex, adaptive brain network dynamics.Computational models link cellular mechanisms of neuromodulation to large-scale neural dynamics.


Transcript

Mac    00:00:04    I was like, oh my God, around the corner in this place that everyone likes to forget. And if you tell most neuroscientists about it, that aren’t studying it, they’ll yawn and pretend like they’ve got some other problem to solve, but come on, man. The cerebellum is just beautiful in a way what the thalamus is doing is really controlling the state and then any influence that happens to it or the time, whether it be a cortico influence, a basal ganglia influence, cerebella a curricular influence, a neuromodulatory influence is going to shape and change the way that the state will change over time, which is one of the most crucial factors for determining how we do what we do. These things often strike you when you least expect them. But I think they’re an underappreciated aspect of, of science or at least the pilot sites that I really love of that kind of wallowing in your uncertainty until it resolves itself, I think is one of my favorite pots.  



Speaker 0    00:01:00    This is brain inspired.  



Paul    00:01:13    Hey everyone, it’s Paul. On this episode, I bring you an appreciation for the detailed nitty gritty work being done in systems neurobiology that highlights its importance in understanding the big picture functioning of our brains. Mac shine runs the shine lab at university of Sydney in Australia, focused largely on how systems neurobiology can help us understand our cognition. We talk about a pretty wide range of topics, all of which dance around systems neurobiology, which is on the whole, what Mac focuses on, but that is a vast range of topics. One of the main things we discuss is the role of sub-cortical brain areas that don’t get nearly as much attention as the neocortex gets, especially in the neuro AI world, where AI tries to glean some inspiration from brains, but work-like max theoretical work that we discuss hopefully will change that cortico centric bias. Many of us have.  



Paul    00:02:10    The main thing we discuss is the role of the thalamus mediating communication among the basal ganglia, the cer...]]>
                </itunes:summary>
                                                                            <itunes:duration>01:43:12</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 120 James Fitzgerald, Andrew Saxe, Weinan Sun: Optimizing Memories]]>
                </title>
                <pubDate>Sun, 21 Nov 2021 21:47:33 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-120-james-fitzgerald-andrew-saxe-weinan-sun-optimizing-memories</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-120-james-fitzgerald-andrew-saxe-weinan-sun-optimizing-memories</link>
                                <description>
                                            <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2021/11/art-120-01.jpg" alt="" class="wp-image-1602" />



<p class="has-text-align-center"><strong>Support the show to get full episodes and join the Discord community</strong>.</p>



<div class="wp-block-image"><a href="https://www.patreon.com/braininspired"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2021/09/become_a_patron_button@2x.png" alt="" class="wp-image-1434" /></a></div>



<ul class="blocks-gallery-grid"><li class="blocks-gallery-item"><img width="150" height="150" src="https://braininspired.co/wp-content/uploads/2021/11/weinanhead-150x150.jpeg" alt="" class="wp-image-1604" /></li><li class="blocks-gallery-item"><img width="150" height="150" src="https://braininspired.co/wp-content/uploads/2021/11/jameshead-150x150.jpeg" alt="" class="wp-image-1603" /></li><li class="blocks-gallery-item"><img width="150" height="150" src="https://braininspired.co/wp-content/uploads/2019/11/saxe_portrait_250-150x150.jpg" alt="" class="wp-image-906" /></li></ul>



<p>James, Andrew, and Weinan discuss their recent theory about how the brain might use complementary learning systems to optimize our memories. The idea is that our hippocampus creates our episodic memories for individual events, full of particular details. And through a complementary process, slowly consolidates those memories within our neocortex through mechanisms like hippocampal replay. The new idea in their work suggests a way for the consolidated cortical memory to become optimized for generalization, something humans are known to be capable of but deep learning has yet to build. We discuss what their theory predicts about how the “correct” process depends on how much noise and variability there is in the learning environment, how their model solves this, and how it relates to our brain and behavior.</p>



<ul><li>James’ <a href="https://www.janelia.org/people/james-fitzgerald">Janelia page</a>.</li><li>Weinan’s <a href="https://www.janelia.org/people/weinan-sun">Janelia page</a>.</li><li>Andrew’s <a href="https://www.saxelab.org/people/andrewsaxe/">website</a>.</li><li>Twitter: <ul><li>Andrew: <a href="https://twitter.com/SaxeLab">@SaxeLab</a></li><li>Weinan: <a href="https://twitter.com/sunw37">@sunw37</a></li></ul></li><li>Paper we discuss:<ul><li><a href="https://www.biorxiv.org/content/10.1101/2021.10.13.463791v1">Organizing memories for generalization in complementary learning systems</a>.</li></ul></li><li>Andrew’s previous episode: <a href="https://braininspired.co/podcast/52/">BI 052 Andrew Saxe: Deep Learning Theory</a></li></ul>


<div class="lightweight-accordion"><span>Transcript</span><div class="lightweight-accordion-body">

<p>Andrew    00:00:04    I guess the jumping off point is this long running debate about where memories are stored in the brain. And it’s a profound question is, you know, something that’s, people have struggled with for many decades at this point.  </p>



<p>Weinan    00:00:17    Like this is really give me that insight on, you know, why a lot of episodic memories are being kept in the hippocampus and require the hippocampus. It’s just a lot of it is because the world is so complex.  </p>



<p>James    00:00:32    I definitely think that, you know, we wouldn’t have gotten to where we currently are in AI without past generations of theoretical neuroscience research. And I also definitely think that projects like this, where we try to kind of boil it down to the essentials and really analyze everything very rigorously and really try to figure out to what extent is relates to the biological brain will provide useful seeds for future AI research.  </p>



<p>Speaker 0    00:01:01    This is brain inspired.  </p>



<p>Paul    00:01:14    Hey everyone, it’s Paul today. I have three find folks on the podcast. James Fitzgerald, Andrew Sachs and Wayne on soon. James and Wayne on are both at the Janelia research campus...</p></div></div>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[




Support the show to get full episodes and join the Discord community.











James, Andrew, and Weinan discuss their recent theory about how the brain might use complementary learning systems to optimize our memories. The idea is that our hippocampus creates our episodic memories for individual events, full of particular details. And through a complementary process, slowly consolidates those memories within our neocortex through mechanisms like hippocampal replay. The new idea in their work suggests a way for the consolidated cortical memory to become optimized for generalization, something humans are known to be capable of but deep learning has yet to build. We discuss what their theory predicts about how the “correct” process depends on how much noise and variability there is in the learning environment, how their model solves this, and how it relates to our brain and behavior.



James’ Janelia page.Weinan’s Janelia page.Andrew’s website.Twitter: Andrew: @SaxeLabWeinan: @sunw37Paper we discuss:Organizing memories for generalization in complementary learning systems.Andrew’s previous episode: BI 052 Andrew Saxe: Deep Learning Theory


Transcript

Andrew    00:00:04    I guess the jumping off point is this long running debate about where memories are stored in the brain. And it’s a profound question is, you know, something that’s, people have struggled with for many decades at this point.  



Weinan    00:00:17    Like this is really give me that insight on, you know, why a lot of episodic memories are being kept in the hippocampus and require the hippocampus. It’s just a lot of it is because the world is so complex.  



James    00:00:32    I definitely think that, you know, we wouldn’t have gotten to where we currently are in AI without past generations of theoretical neuroscience research. And I also definitely think that projects like this, where we try to kind of boil it down to the essentials and really analyze everything very rigorously and really try to figure out to what extent is relates to the biological brain will provide useful seeds for future AI research.  



Speaker 0    00:01:01    This is brain inspired.  



Paul    00:01:14    Hey everyone, it’s Paul today. I have three find folks on the podcast. James Fitzgerald, Andrew Sachs and Wayne on soon. James and Wayne on are both at the Janelia research campus...]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 120 James Fitzgerald, Andrew Saxe, Weinan Sun: Optimizing Memories]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2021/11/art-120-01.jpg" alt="" class="wp-image-1602" />



<p class="has-text-align-center"><strong>Support the show to get full episodes and join the Discord community</strong>.</p>



<div class="wp-block-image"><a href="https://www.patreon.com/braininspired"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2021/09/become_a_patron_button@2x.png" alt="" class="wp-image-1434" /></a></div>



<ul class="blocks-gallery-grid"><li class="blocks-gallery-item"><img width="150" height="150" src="https://braininspired.co/wp-content/uploads/2021/11/weinanhead-150x150.jpeg" alt="" class="wp-image-1604" /></li><li class="blocks-gallery-item"><img width="150" height="150" src="https://braininspired.co/wp-content/uploads/2021/11/jameshead-150x150.jpeg" alt="" class="wp-image-1603" /></li><li class="blocks-gallery-item"><img width="150" height="150" src="https://braininspired.co/wp-content/uploads/2019/11/saxe_portrait_250-150x150.jpg" alt="" class="wp-image-906" /></li></ul>



<p>James, Andrew, and Weinan discuss their recent theory about how the brain might use complementary learning systems to optimize our memories. The idea is that our hippocampus creates our episodic memories for individual events, full of particular details. And through a complementary process, slowly consolidates those memories within our neocortex through mechanisms like hippocampal replay. The new idea in their work suggests a way for the consolidated cortical memory to become optimized for generalization, something humans are known to be capable of but deep learning has yet to build. We discuss what their theory predicts about how the “correct” process depends on how much noise and variability there is in the learning environment, how their model solves this, and how it relates to our brain and behavior.</p>



<ul><li>James’ <a href="https://www.janelia.org/people/james-fitzgerald">Janelia page</a>.</li><li>Weinan’s <a href="https://www.janelia.org/people/weinan-sun">Janelia page</a>.</li><li>Andrew’s <a href="https://www.saxelab.org/people/andrewsaxe/">website</a>.</li><li>Twitter: <ul><li>Andrew: <a href="https://twitter.com/SaxeLab">@SaxeLab</a></li><li>Weinan: <a href="https://twitter.com/sunw37">@sunw37</a></li></ul></li><li>Paper we discuss:<ul><li><a href="https://www.biorxiv.org/content/10.1101/2021.10.13.463791v1">Organizing memories for generalization in complementary learning systems</a>.</li></ul></li><li>Andrew’s previous episode: <a href="https://braininspired.co/podcast/52/">BI 052 Andrew Saxe: Deep Learning Theory</a></li></ul>


<div class="lightweight-accordion"><span>Transcript</span><div class="lightweight-accordion-body">

<p>Andrew    00:00:04    I guess the jumping off point is this long running debate about where memories are stored in the brain. And it’s a profound question is, you know, something that’s, people have struggled with for many decades at this point.  </p>



<p>Weinan    00:00:17    Like this is really give me that insight on, you know, why a lot of episodic memories are being kept in the hippocampus and require the hippocampus. It’s just a lot of it is because the world is so complex.  </p>



<p>James    00:00:32    I definitely think that, you know, we wouldn’t have gotten to where we currently are in AI without past generations of theoretical neuroscience research. And I also definitely think that projects like this, where we try to kind of boil it down to the essentials and really analyze everything very rigorously and really try to figure out to what extent is relates to the biological brain will provide useful seeds for future AI research.  </p>



<p>Speaker 0    00:01:01    This is brain inspired.  </p>



<p>Paul    00:01:14    Hey everyone, it’s Paul today. I have three find folks on the podcast. James Fitzgerald, Andrew Sachs and Wayne on soon. James and Wayne on are both at the Janelia research campus at the Howard Hughes medical Institute. James is a group leader and Winan is a research scientist in Nelson spruce tins lab. And Andrew is a joint group leader at university college London. Andrew’s been on the podcast before, when we discussed his work on deep learning theory on episode 52. And you’ll learn more about what James and wanan on do, uh, in a moment, the reason I’m speaking with them today is their recent theoretical neuroscience. Pre-print with the title organizing memories for generalization in complimentary learning systems. So we’ve discussed complementary learning systems a few times on the podcast before the general idea is that we have a fast learning system in the hippocampus that rapidly encodes specific memories.  </p>



<p>Paul    00:02:13    And we have a slower learning system in our neocortex where over time and through mechanisms like replay from the hippocampus memories get consolidated. So in their new paper, they build on complimentary learning systems and suggest that these two learning and memory systems might work better together in a way that optimizes generalization, which is a good thing. If you want to function well in our topsy turvy world. And one of the big takeaways is that how much consolidation should happen from hippocampus to cortex depends on how predictable the experiences are that your brain is trying to remember. So in unpredictable environments, you want to cut off the consolidation, uh, pretty early in predictable environments. You want to let consolidation run for longer. And that’s of course, a very simplified explanation, which gets elaborated during the podcast. So they built a model to explore that hypothesis, and we discussed many topics around their model and related phenomenon.  </p>



<p>Paul    00:03:17    I link to the paper in the show notes at brain inspired.co/podcast/ one twenty one hundred and twenty. Uh, there’s also a guest question from Mike, a Patrion supporter who actually pointed me to the work that we’re discussing. So thanks Mike for the question and pointing me to it and thank you to all my Patrion supporters, by the way, during my little, uh, post recording check-in with James, Andrew and Wayne on, uh, asking if everything went okay. Uh, James mentioned that it felt a lot like a conversation they would have in their regular weekly lab meetings. So if you’re wondering what their lab meetings might be like, here you go, Andrew James wanan, uh, thanks for being on the show here. So, um, what we’re going to do to start off is I’m going to ask you guys to each introduce yourselves. Andrew, let’s start with you because you were, uh, on the podcast before we were talking about deep learning theory and ever since you’re on, of course, uh, you’ve been emailing me. Can I please come back on? Can I please get back on and, and finally we have you back on here. So, uh, Andrew, who are you?  </p>



<p>Andrew    00:04:24    Yeah. Um, so I’m a joint group leader at, uh, the Gatsby unit and Sainsbury welcome center at UCL. And I interested in deep learning theory and ways that can inform theories of psychology and neuroscience.  </p>



<p>Paul    00:04:41    James, would you like to go next?  </p>



<p>James    00:04:43    Sure. Yeah. Yeah. My name’s James Fitzgerald. I’m a group leader at Janelia research campus, which is part of the Howard Hughes medical Institute. So I’m a theoretical neuroscientist. I’m actually very broadly interested in a lot of different things. So in addition to the learning and memory stuff, we’ll talk about today. I also work on like small animals, like zebra fish and food flies. Um, and I’m very collaborative. So I’d like to work with diverse people coming from all sorts of different perspectives. I think that’s one of the most fun parts of doing science.  </p>



<p>Paul    00:05:10    I’m going to ask about the collaboration in just a minute here. Wayne on hi,  </p>



<p>Weinan    00:05:15    Paul. It’s great to finally meet you. Yeah, you too. My name’s wanan, I’m a currently a senior postdoc in Nelson spruce and slab in Idina. So I joined at now four years ago after, uh, doing ion channel bell physics and Snapchat transmission studies, uh, for seven years after joining Ginita. I just wanted to sort of step up and get a bigger picture kind of more a framework thinking on neuroscience. So I decided to, okay, let me do theory and experiments together. So it has been a pleasure to collaborate with engineering teams on this project.  </p>



<p>Paul    00:05:52    Yeah. So, so this project is, um, pretty much all theory, right? So, uh, the title of the paper that we’re going to talk about is organizing memories for generalization and complimentary learning systems. Before we talk about the theory, um, I kind of want to ask, well, how did this collaboration come about? And, um, anyone can jump in and answer because, uh, and, and tie it into that is the idea of, of Janelia. And I, I’m curious, you know, it seems like Janelia specifically is highly collaborative. So, um, I don’t know if that’s a factor in this as well.  </p>



<p>James    00:06:29    Yeah, for sure. I mean, so I think, you know, as you just heard to weigh on is actually a Nelson spoons lab at Janelia. So none of us are actually in the same lab anyway. Um, so Janelia labs, the very small to kind of try to encourage collaboration. So the idea being that, you know, no one lab has enough people or all the expertise you would want to kind of achieve the project’s goals.  </p>



<p>Paul    00:06:51    Is that built in though as a, as a principle when forming  </p>



<p>James    00:06:55    It is. Yeah. So they, that’s why they’d kept the labs small is to kind of, you know, really encourage people to interact with each other and collaborate. So, so why not? And I kind of arrived at Janelia at almost the same time and, uh, you know, at the time my lab was completely empty. So we non was really kind of my first main post-doc collaborator at Janelia, even though he didn’t have any position in my lab. So I dunno know way. Do you want to kind of affect a little bit on the early days?  </p>



<p>Weinan    00:07:21    Oh yeah, that was interesting story. So in 2017, I joined and that’s executive one James just joined. I was just in a sort of experimental crisis mode. Like I’ve been doing so many experiments and, uh, and that I was trying to decide what to do here at engineer. I know I joined Nelson slab just to study some more single neuron computations, but that’s when DeepMind released their off ago. Yeah. And it was having a big splash in the world and, uh, I was a long time go player and that just really shocked me how human life the moves are. And it would just start to think, okay, so where can I get frameworks, uh, to inform like all the data I collected, I think I need to collaborate with theorists and I need to combine neuroscience AI. So, and then James and I started to talk and, uh, I was generally interested in complementary learning systems. So the seminal work by J by Cleveland, and then we started talking and then James was interested. Okay. So why don’t we just start with modeling, say one, because he, his recent findings show that say one spine is a very transient. So it turns over every two weeks  </p>



<p>Paul    00:08:40    And hippocampus I’ll just interject and hippocampus. Yeah.  </p>



<p>Weinan    00:08:44    Yeah. So that quick spine dynamics, James suggest that that might be a form of regularization, like weight decay and machine learning. So that’s why, what got us started self is than that. And I don’t want to get this too long, but, uh, but later on James, just, I remember that really vividly. He, he drew on the board that, okay, do you know what happens? You’ve have noise in the data. If you train, uh, on the training data and the generalization error, if that your performance on new data will start to decrease, and that will start to rise up. And I remember being an expert on this, I was shocked by that fact that I said, why is that okay? You need regularization in order for training to not over-fit.  </p>



<p>James    00:09:30    Oh yeah. And I think that’s just interject a little bit. I think that’s actually a great segue in also to how Andrew got involved in all this stuff, which is that Andrew and I go back a long, long way. So we were grad students together. We knew each other way back then. And then we were actually postdocs also together in the same program. And so we’ve known each other for a very long time, but we hadn’t been collaborating and a wall. Um, Andrew was in his postdoc, he was working with MedU, another author on this paper. Uh, very precisely analyzing the amount of learning in a batch system that, you know, would be optimal for generalization. And so it was very much in my mind, the importance of thinking about legalization in part by, you know, interactions with Andrew in a non-collaborative way, but then once went on and I started to think about the benefits, um, that, you know, this types of legalization might provide for learning systems.  </p>



<p>James    00:10:19    Uh, we decided to be really fun to see if Andrew would also be interested because we knew that he had overlapping interests too. And that’s when we kind of, uh, kind of bought Andrew and Debbie thing. And we had Andrew come to Janelia, we planned to visit our project. Uh, and then, you know, we voted up and we’ve been working together for the last couple of years on this. So yeah, in the early days, but we’re not. And I we’re modeling, you know, we were thinking about kind of the role of, uh, you know, transient memories in the hippocampus and what that might do to kind of aid system-wide function and a complimentary learning systems type way. But we didn’t actually have any kind of explicit, critical model in those early days. And it was only once Andrew started to get involved, that we kind of really started to build a integrated model of the whole system, uh, based on the insights that he’s had by kind of thinking about learning and deeply learning networks.  </p>



<p>Paul    00:11:06    Oh, Andrew you’re you’re, uh, not at Janelia. So, uh, I don’t know if you’re being pulled in a thousand different directions by lots of people that want to collaborate, uh, with you. So yeah. How do you, where’s your threshold? How’d you get sucked into collaboration with these guys?  </p>



<p>Andrew    00:11:25    Yeah. Well, I was just thinking, I’m glad that you to remember how this started as sort of ironic given that we’re studying episodic memory, but I really don’t remember quite how it all came together. Um, but, but yeah, I mean, I guess the, the memories that are recurring or yeah. Thinking about do 33, I have this structure that I’m hoping will be replicable across many experimental domains. And the idea is first come up with some basic insights into how deep networks work, just treat them on their own terms. At that point, it looks basically like what a computer scientist or physicist would be doing. And then if you can get some durable insights into those systems, hopefully that offers interesting hypothesis for search and experimental to means. And in this case, we initially did work on generalization error and then it seemed like this could potentially shed light on, um, mechanical consolidation. We just had a co-sign poster on this to just do a night that didn’t go nearly as far as this paper, but it was some early seeds of it. And then I think James and I probably just heartburn James Richard.  </p>



<p>James    00:12:34    Well, it couldn’t reconstruct actually how I even looked the old emails, how this got started. But I do remember, I do remember when you visited us at Janelia and you know, and at that point, presumably we already knew we were planning to collaborate or maybe we just had to come for a talk, actually, maybe, maybe it was just going to come and give a talk. But in any case, I remember us taking a walk along Seldon island. So Janelia is white on the Potomac and one of the odd, but wonderful things about us is we have a private island in the Potomac that does a footbridge and it’s just wild. This there’s nothing that are except for like a field. And, and so Andrew and I will walk in and, you know, weigh on and I had already been working on this stuff. And of course, Andrew had been doing his stuff with MedU and we were just discussing like, well, you know, everybody talks about complementary learning systems and it’s kind of implicit in a lot of what people say that they think that the point of having the cortex is for generalization, but do people actually realize the danger of overfitting in these systems?  </p>



<p>James    00:13:25    And we were kind of debating this back and forth a lot because on the one hand we were like, well, it seems like they kind of should, if they think about it from the viewpoint of machine learning, but at the same time, it didn’t actually seem like anybody had been thinking through the consequences of that about kind of, well, then you really need to regulate the amount of transfer. You can’t have it that you just kind of fully transfer memories from the hippocampus to the neocortex. And then I think that these conversations also in terms of why not, I can remember that same, visit him getting very excited, uh, by the generalization angle, you know, before that visit, we were thinking about many different things about the benefits of kind of transient HIPAA capital traces, also in terms of things like memory capacity in the hippocampus and stuff like that. But I think after that visit, we all kind of consolidated around this fundamental importance of, you know, if you build the system for generalization, there’s going to be some new requirements that, you know, people have not been thinking through, uh, from the perspective of Nolan.  </p>



<p>Andrew    00:14:19    And I think one of the most fulfilling aspects of this collaboration is that at this point, the idea is, are so jointly wealth and that it’s like, it really was one of those wonderful times where you’re just everyone’s riffing off of each other. And that somehow it comes out to be this thing that’s greater than that. Some of the parts,  </p>



<p>Paul    00:14:40    Well, that’s interesting. I didn’t know that the connection James between you and Andrew, because I didn’t do my homework about reading your CVS, I suppose, but I, but I believe it was Wayne on actually who first recommended, uh, Andrew to come on the podcast way back when, um, so I know that there’s a connection there as well. Um, It’s great to hear, well, let’s talk about, uh, the big idea in the paper and then, um, we can kind of unpack it from there because you’ve already hinted at some of the, um, some of what it’s about. So I don’t know who would like to start and give out like a really high level overview. Um, and then we can talk about, uh, complimentary learning systems and just go on down the list there.  </p>



<p>Andrew    00:15:26    Sure. Well, maybe I can. So, um, I guess the jumping off point is this long running debate about where memories are stored in the brain. And it’s, it’s a profound question is, you know, something that’s people have struggled with for many decades at this point. And the data from the neuropsychology is riveting the patient Hm. For instance, lost his hippocampus and other MTL structures and just can’t form new memories, but even more striking, if you look into the memories before he had this, um, resection operation, uh, a lot of those memories are damaged as well. So the damage went back into the past basically. So that’s retrograde amnesia. And what that suggests is that there’s some process by which memories might initially be started at the campus, but ultimately transfer out or, um, reduplicate themselves in other parts of the brain. But this just raises all kinds of questions.  </p>



<p>Andrew    00:16:29    Why would you have a system set up like this? Why do you need multiple memory systems to start with, why couldn’t you have these memories stored in all places across the brain? Uh, and there’s a raging debate about this, this topic. And so when we were looking at this, we were trying to find, um, ideas that might’ve been overlooked and looking at machine learning, you can see that there’s, there’s this very interesting phenomenon that if you’re training from a fixed set of data, a fixed batch of data, and you’re going through it again and again, and the idiosyncrasies of that data can cause you to learn spurious relationships. And so too much learning from the same fixed batch of data can be counterproductive. And so we thought maybe this was relevant to a systems consolidation. If you think of that batch of data, as the experiences you started your hippocampus, what is saying is that there’s only so much replay you should do to sort of try to encode those memories into neocortex, because if you did too much, you would not be learning the general rule that’s in that data set. And so that we think can start to make sense of a lot of these critical puzzles that have been out there.  </p>



<p>James    00:17:45    Yeah. So maybe to kind of just elaborate on that a little bit. So like, you know, one of the big empirical puzzles is that, you know, because of patients like, Hm, there’s been, what’s called the standard theory of systems consolidation, which says that, you know, in the beginning, everything is encoded in the hippocampus, but then over time, everything is consolidated into the cortex and it becomes hippocampus independent. Um, and there’s a lot of data to support that from both humans and from animals. But over time, there’s also been a growing body of literature that conflicts with that and suggest that in both humans and in animals, certain types of memories do permanently require the hippocampus. And so there’s been kind of a conceptual shift in the field where people start to think about consolidation, not just as something that happens over time, but that has something to do with the content of memory. And, um, you know, there’s all sorts of, uh, conceptual ideas about what that content might be and why it is that certain things to be quite of the hippocampus permanently. But again, from this perspective of generalization and neural networks, we thought we might be able to make this very concrete about kind of when you can and when you can’t, uh, take something that was initially encoded in the hippocampus and gradually make it camp or independent.  </p>



<p>Paul    00:18:55    So you you’ve taken complementary learning systems, which we’ve already talked about, um, a bit. And, uh, essentially the theory, uh, is generalization optimized, complimentary learning systems is the name of the, I guess, is it the theory or the model setup, or is that interchangeable?  </p>



<p>Andrew    00:19:14    It’s sort of actually two levels that you can view. This is a work on the first is a formalism that could let you model many other kinds of consolidation theories. So we have this particular mathematical framework. You can instantiate the standard theory. You can instantiate generalization, optimized, complementary learning systems. There may be others. And so at that level, it sort of lets you understand the consequences of different choices of how these memory systems interact. But then the one that looks good to us, just looking at the data, yes, is generalization optimized, complementary learning systems.  </p>



<p>Paul    00:19:51    So what is the take home of what’s new about a generalization optimized CLS versus an a, you may be repeating yourself. So I apologize.  </p>



<p>James    00:20:01    So maybe the way I’d characterize the original CLS idea is that there are benefits from a rapid learning system and a slow learning system. And a lot of those benefits that we’ll highlight it in the context of the original complimentary learning systems idea was that having a fast learning system that can record and memorize examples could allow the slow learning system to interleave those examples during learning and prevent what they called catastrophic interference. And so you’d be able to use the fast system to record the memory as it comes, but then slowly train up a slow learning system based on those experiences and idea would then be that in this slow learning system, you get some sort of representation that is kind of generalized over the various training examples that you’ve seen. So in some sense, I think generalization has always been an important part of thinking about the complimentary learning systems framework.  </p>



<p>James    00:21:05    Um, but what is new and how we set things up is that we have an explicit generative model for the environment that allows us to consider the possibility that there are unpredictable elements in that environment or noise if you set it up in kind of an abstract way and because of this, but we show that had not been kind of considered an earlier complimentary learning systems models, is that in the presence of noise, it’s not always ideal actually for the slow learning system to learn forever. At some point you actually have to stop learning to avoid overfitting to this noise, which again, from the viewpoint of machine learning makes a lot of sense, but in the setting of conventional complimentary learning systems problems, you’re learning from these data that don’t actually have any noise. It’s just kind of these very reproducible, very reliable cognitive relationships.  </p>



<p>James    00:21:53    And as a consequence, there’s no tension between, you know, what we’d call a generalization of what we call is memorization, that the ability of the system to recall those examples versus kind of deal with us, you know, cognitively as semantically, similar examples going forward. But once you add noise, you break that and it starts to become actually then you have to make a choice that, you know, well, what is it that you want this slow learning system to do? Do you want it to be able to faithfully reproduce the past, which is what we would call the standard model of systems consolidation, or do you want it to actually do as well as it possibly could and anticipating new experiences from the environment that could incur in the future. And, and that’s what we mean by generalization.  </p>



<p>Paul    00:22:33    So I had, uh, already Hassan on a awhile back and he, he, uh, had written this paper, uh, I believe called direct fit to nature. But the idea was that, uh, our cortex essentially, uh, have so many neurons and synapses, um, AKA, so many parameters that it’s constantly trying to, uh, overfit trying to, basically you can’t overfit, um, it’s so big that it can memorize everything. And of course this has been shown in deep learning networks as well, is the right way to think about the cortex then. So, so James, what you were just describing was sort of a larger picture, um, what a normative framework for what you would want as a generalization, um, organism didn’t realization, uh, geared organism. Um, and one of the things I thought in reading the paper, um, was, well, is it right to think of cortex then as trying it’s damnedest to fit everything perfectly. And, uh, there’s, there are regulatory systems that are preventing it from this direct fit that, uh, Ori talks about. And so at the organism or brain level, I suppose we should think about that system as separate from, well as a kind of a control mechanism for this otherwise, uh, running rampantly, memorizing, uh, things cortex, sorry, that was a mouthful.  </p>



<p>Andrew    00:24:00    Yeah, no, I think that, I mean, you can do better by for instance, stopping training early, even in a large deep network. And so if this is something that, um, the brand takes advantage of and it would be generalizing better. So there are circumstances like already saying where if you have a giant network you’re not really going to over-fit dramatically. So it’s not maybe a huge benefit if you stop early, but it’s still there. And in some regions that effective, early stopping is incredibly important. So if, if the number, if the amount of experience is roughly matched to the number of degrees of freedom in your model, then that’s the point where, uh, you could get a lot of benefit from replaying that data, if it’s noise free, because you could just perfectly determined what the whole mapping should be. But if it’s noisy, it’s also the point where you can do as badly as possible.  </p>



<p>Andrew    00:24:54    And so regulation regulation is very important. And maybe just to highlight with an example, so because these things can sound abstract, the patient Hm. For instance, um, uh, Suzanne Corkin, the MIT professor who did a lot of work with him, was asking him, um, if he had any memories of his mother and specific memories of his former mother and his response was well, she’s just my mother. And, you know, I, I think the point there is that this is someone who had their entire cortex intact, right? And they, he could not come up with a specific memory of even his pants. Right. It’s true of his father as well. Like you do all kinds of more reliable facts. He knew that his father for instance, was, um, from the south and things like this. And so there’s this interesting, um, tension here where the, the, the quality of the memory, the type of memory that you can put in new cortex seems to be very different.  </p>



<p>Andrew    00:26:01    And we think this theory explains some of that because there’s certain components of a memory or a certain scenario that you can transfer. Like the fact that mom is mom is always true, very reliable, but then there’s other features of memory, which can be very idiosyncratic, like what you did one specific Christmas. So he knew that Christmas trees were things that happened at Christmas time, right. But he didn’t have a specific memory of one specific Christmas and what they did. And that that’s the sort of, that’s what we’re proposing is explaining the character of this transformation as, um, uh, being aimed at generalization and, um, flowing from these properties didn’t numbers.  </p>



<p>Paul    00:26:48    Maybe we should get into the model, the models, the three models used, and just the whole setup. I was going to say, experimental setup, the whole modeling setup. So, uh, on, do you want to describe how, like, the different kinds of models used that are supposedly to represent different brain areas, although you use a different vernacular in the paper, because, uh, you talk about how these could map onto other brain areas or it’s amenable to other brain areas because, well, so you use student teacher, uh, and notebook in the paper, but, uh, do you want to talk about how the, what the models are and how they map on?  </p>



<p>Weinan    00:27:25    Oh yeah. So we thought about how to formalize this learning problem of systems consolidation. So Timberlake think about a brain that can learn things from environment and what is the environment. It’s just a sort of view be viewed as a data generator and you produce some kind of input, output mappings, maybe very complex functions. And we want to replicate that by very simple, generous model. In this case, it’s a shallow linear neural network. So it just transforms an input vector into a single skater in most of our simulations. And this funds formation generally produced data pairs like X and Y input output pairs to feed that to another similar architectured student network. So it’s another shallow, uh, linear network that can between all the 20 data generated by the teacher and learn to represent the mapping of the teacher. And the student learning is 80 through a memory module, similar to like the current AIS external memory idea is modeling the hippocampus.  </p>



<p>Weinan    00:28:36    So it’s a healthy network. That’s, bi-directionally conducted all to all to the student. So the job is really capture the ongoing, uh, experiences by one shot in coding, through hiding learning. So what the heck campus has to be in suppose to be learning really fast. And then after capturing that it has the ability to undergo patent completion offline. So it can just randomly search for a previous memory and reactivate the student through the back, uh, the feedback weights. So this essentially is modeling episodic recall. So you could offline re play what the students was seeing when the teacher give the student the example. So as he, if you have the teacher, like it’s a notebook, so you just reviewing what the teacher said, essentially. So through doing this offline activation, the student can learn much more efficiently as we later show in the paper. So that’s roughly the three, uh,  </p>



<p>Paul    00:29:34    And your networks. Yeah. So you have the environment, which is the teacher. You have the, uh, cortex, which is the student, and you have the hippocampus, which is  </p>



<p>James    00:29:46    Just to kind of emphasize. One of the thing is that, you know, one important difference between, you know, the teacher and the student is that the teacher has noise. And because the teacher has noise, where that means is that the mapping provided by the teacher may or may not actually be fully learnable by the student and controlling the magnitude of that noise then is a critical parameter that determines the optimal amount of consolidation in this framework,  </p>



<p>Weinan    00:30:14    Just for the lights. One more thing that you asked, what’s new compared to the original cos framework. So we have an explicit notebook in this, uh, in this model that’s directly connected to the student. I think, uh, some of the early CLS works just kind of replay, um, training examples, not by storing them in a neural network, but just replaying the representations. And this has generated some really interesting insights that we can talk about later, like having a distributed binary, healthy network reactivity of the student could have some very interesting, uh, interference, robust properties to train the student.  </p>



<p>Paul    00:30:51    Great. Andrew, I was going to ask, uh, so you guys are using linear, although these are shallow linear networks, and we talked all about your deep linear networks last time you were on, why, why the linear networks in this case, is it just to have principled theoretical accountability?  </p>



<p>Andrew    00:31:09    Yeah, I mean, I, I hope one day we’ll have nonlinear ones, but, um, all of the qualitative features that we wanted to demonstrate came out with shadow linear networks. So it’s just learning linear progression. Right. And so my impulse and I think is shared by James and went on to some extent, at least is to go as simple as you possibly can and still, um, get at the essential and what you get for that in return is greater tractability. So another feature of this framework is that most of our results are sort of mathematical demonstrations. And so you feel like you can really, at least I feel it’s easier to get one’s head around it. And, uh, another thing that this very simple setting enabled is we can make clearer normative claims so we can optimize everything about these settings. How well could you possibly do if you just had a notebook or if you just had the student and then we can show that yes, indeed. You really do do better when you have both interacting.  </p>



<p>James    00:32:11    No, I was just going to say, and just to add to that, I think another thing that’s really powerful about setting it up in this very simple way and being able to analyze it so comprehensively is that, you know, as we kind of alluded to earlier, I think one of the big challenges in memory research is to figure out, well, what is the key quantity that determines whether it’s going to be hippocampus dependent or not. And within this kind of modeling architecture, we can really solve that problem from the viewpoint of what would optimize generalization and then, you know, going forward, you know, Wayne ons and experimenters. So we can actually design experiments very much around directly that parameter and just test the theory very rigorously about whether or not this actually does provide a empirically meaningful predictions more than just the theoretical insights. And I think that that gets harder and harder. The more complicated the model becomes to really kind of boil down what is the critical parameter and to design an experiment that embodies that critical parameter.  </p>



<p>Paul    00:33:06    Oh, no. Wayne on you’re going to be stuck in experimental crisis still. You’re trying to get out of that.  </p>



<p>Weinan    00:33:12    No, I think that it’s perfect. Combination of theory and then do the experiment.  </p>



<p>Paul    00:33:17    Okay. Alright. So who wants to talk about how, how the model works to, to generalize the, the right amount of generalization? Yeah.  </p>



<p>Andrew    00:33:26    So the setting that we’re looking at is sort of like, imagine you’re doing a psychology experiment for an hour and you see a bunch of, uh, experiences over that course of that hour. And then you go home and over maybe many days, um, you have whatever you store and during that hour, and you can, uh, perhaps, you know, different, the notebook could replay that information out into students to learn from it. And then after some period of time would bring you back into the lab and we test your memory. So it’s this sort of upfront get a batch of data. How do you make the best use of that scenario over analyzing? And so generalization for us just means when you come back to the lab, how well will you do on new problems? Instances drawn from the distribution of problem instances that you’re seeing on that first case first time.  </p>



<p>Andrew    00:34:23    So it could be, you’re learning to distinguish dogs and cats or something like this. And then we show you a new images of dogs and cats. How well do you do on that? And, um, the key feature of the framework is that, um, just as in deep learning theory, it means is building directly off of deep learning theory and double descent phenomenon. Um, there’s an optimal amount of training time that you can train from a fixed batch of data because otherwise you start picking up on these aspects that are just noise. And so, um, as the predictability of the rule that you’re trying to learn increases, you can train for longer and longer and you can characterize sort of exactly how long, um, but that, that’s the basic idea. As you get more predictable, you can train for longer. If you can train for longer, you can also memorize the exact examples.  </p>



<p>Andrew    00:35:17    You’ve seen more. And so your, your memory error is decreasing. And that means that more of the memory with specific memory would transfer into your cortex and not just be a notebook. Maybe one other thing to throw in here before I let someone else jumped in and said, you can compare this. So there’s different ways you could generalize. You can try to use the student network, but you could also try to use the notebook. You can just say, let this hot field network complete the pattern, that whatever pattern you give it and make its prediction. And, uh, one important result here is that in high dimensions, that strategy fails completely actually. So basically if you think of high dimensional vectors, the geometry is very different. Any new input is almost surely orthogonal to all of the inputs in your, all of the experience that you’ve had previously. And because of that, it doesn’t let you generalize. So it’s interesting, you need this notebook to store these examples so that you can replay them to the student, but ultimately it’s the student that’s going to be able to generalize well and not, Nope.  </p>



<p>Paul    00:36:29    Maybe this is a good time. So I have a listener question. So predictability is a key aspect of, uh, of the performance of the generalization performance. So, um, with different levels of predictability, the S the generalization needs to cut off right at certain different points. Well, you know what, I’ll just play the question. So this is from Michael till doll, and then we can, uh, back up and talk, talk about the bigger issue after the question, if needed,  </p>



<p>Speaker 5    00:36:57    And then discussion section. It suggested that replay could be the mechanism that regulates generalization to the neocortex, which seems very probable, but the thing I’m still missing is do you have any ideas around how predictability of an experience as determined as that seems to be a key parameter in the theory?  </p>



<p>Paul    00:37:16    Okay. So I know that’s a little ahead of the game here, but I thought I don’t want to, I didn’t want to miss the opportunity, uh, to play the question before one of the, you started, started answering the question on your own.  </p>



<p>Andrew    00:37:27    Yeah, no, that really is such a good question. And that, you know, we don’t, we don’t address that in this paper. What we say is, imagine you had an Oracle, which could tell you exactly how predictable this experience was, what, what should you do to be optimal? But we don’t explain how you could estimate that it’s not, we do think there are ways you could potentially estimate it, but, um, it’s not part of this theory at present. We just are saying, suppose you were able to understand the predictability. What would that then mean for systems consolidation?  </p>



<p>Paul    00:38:05    I was going to reiterate the problem, which is that predictability needs to be estimated by some system to regulate the generalization process.  </p>



<p>Weinan    00:38:14    Yeah. Just to give a journal up yesterday. And this question is such an important one. Yeah. People always ask. Okay. So that’s great. But wait, how do you actually estimate the SNR in the experience? So op Harare, if you get a new batch of data for the first time and you, if you are learning from no previous knowledge, there’s no way for, to know whether this batch of data is predictable or not. So you kind of have to learn that through trial and error, but a, the trial and error can be divided into like a longterm evolutionary scale or like, uh, within a lifetime. So maybe some animals already has built-in predictability estimators, um, from birth, maybe there’s something like humans, like a certain facial features or certain animals that if you see that it just, the brain will treat it as a high, as an Arvada, no matter what, or during our lifetime, I think the property like that, the main way we learn how to estimate the predictability through lifelong meta-learning.  </p>



<p>Weinan    00:39:15    So when you are a child, you experience this a lot, experience, a lot of things, and you’ll make predictions all the time. And then gradually you learn what of, what source of information is good to consolidate all this, give this to the example that, okay, so people typically know the reliability of the source of information. So for example, I gave you an article from New York times, and then it gave you an article from the onions and you get a, like a really visceral feeling like which one to trust more and to understand more. And not that example, it’s like a, my daughter, I, I see, like, she has no idea. What’s almost no idea. What’s predictable. What’s not. And just one time I was holding her and I was cooking and she just wanted to touch the hot stove. I said, okay, uh, that’s not a good idea to do that.  </p>



<p>Weinan    00:40:05    Uh, you’re going to get burdened and she’s got attached to the edge. That’s a little bit. And, uh, she, she just said, okay, if you don’t listen to me, you can go ahead and try it. And the, she touched an ouch and then she turned a hat back to me. I look, I think the looking her eyes is that okay, I really need to listen to this dude in the future. This dude is that I think that’s when the metal learning is occurring in her brain, that is assigning different sources, uh, for like different predictability. Like we all trust our, like authorities, like teachers in our lives, parents and the friend who would trust, like, even like another key aspect is that a lot of people think more frequent things should be consolidated because it’s more reliable or, but Arthur is really decoupling predictability from frequency. So there are like nowadays, you know, there are frequent misinformation online, and it’s not the quantity that can overwhelm your brain and determine what gets transported. It’s really like, like, uh, for example, uh, like something your trust, different told you, like he went for once can, can really make a long lasting impact, but some news outlets give you the same story again and again yet, and you will not transfer. So I think that’s a key thing. Like we learn these predictability through experiences through meta-learning,  </p>



<p>Paul    00:41:31    But those experiences need to be essentially stored right. In some system to be able to be used again. Um, and so is that, is that a, just a different kind of memory? Is that more of a implicit procedural memory or, you know, outside of the hippocampal neocortex complimentary go complimentary learning system framework or, or do we know, or does it matter?  </p>



<p>James    00:41:52    Yeah, that’s a, that’s a good question. I mean, so in our model, the notebook does kind of create a record of the whole memory. And so using the hippocampus or the notebook in the mathematical framework, you can reactivate those critical patterns correspond to that full memory. But that isn’t, I think part of what we mean by complimentary Atlantic systems, he already had the ability to do that in the notebook. So then maybe you don’t need to kind of create a new critical system to do the same thing, and the idea would be, well, what can’t the notebook do? And what the notebook can’t do is to generalize well to new examples and to kind of go back to one away nones points earlier. I don’t mean to say that, you know, the, you know, the neocortex can’t actually aid in memory itself. And in fact, there are some examples within our framework where the, you know, the cortical module is actually able to even memorize better than, uh, the notebook.  </p>



<p>James    00:42:47    But we think that the really fundamental thing that’s missing from that just sort of fateful we production of the past, which the notebook can do is the ability to generalize well, but then the amount of consolidation did it would optimize for that generalizations. What depends on do we have predictability? And as we’ve been describing, you know, it is a very important unknown within our modeling framework, how precisely this gets done. Um, but we think that you first have to recognize that it needs to be done before you have to think about how it is done. And so the earliest experiments, I think that we could design and test, you know, we can just configure them so that the predictability is set according to us, and then see kind of to what extent the brain in fact does regulate the consolidation process based on that predictability, and then get more into the, uh, both algorithmic and mechanistic details of how that degree of predictability is determined. And then once determined how that leads to regulation of the consolidation process itself.  </p>



<p>Andrew    00:43:46    There is some quite compelling experiments that show that it could be that individuals do miss estimate the predictability sometimes, or maybe it’s not even fair to say necessity, but they just estimate it differently. So there’s individual differences that maybe in on, do you want to explain that the generalizer is discriminator experiment?  </p>



<p>Weinan    00:44:08    So I think a key thing about particularly T is that it’s, uh, in a way it’s, it’s not the universal particularly the objectivity is it’s really like the inferred predictability. Uh, I’m not sure if you guys agree, but just the thought of this it’s really like how the agent or how the animal thinks what the predictability is. And that depends on a lot of things. So there’s a interesting set of rodent experiments in fear conditioning. There’s like really like individual differences on like the policy of animals doing the same task, whether they generalize or not. So like, there are certain, certain animals, if you shock them in a cage, for example, and two weeks later, it bring them back and they will show high freezing. So there’s a fear memory, but then if you take the same set of animals into a different, but similar cage to test their fear, generalization only like around half of those animals will freeze.  </p>



<p>Weinan    00:45:10    We’ll start to generalize the different cages, but the other half will just maintain their discrimination in this two cages and not freeze. And they know this is a different cage, surprisingly, that the generalizer is who froze in both cages. The memory is not dependent on the hippocampus. So there’s this evidence that, you know, the generalizes do treat the original context as the high SNR context and, uh, that promoted generalization and systems consolidation so that the memory actually is becoming, you know, dependent. But the other group that is distilled is maintaining the discrimination. Lesioning the hippocampus, uh, actually impaired the original memory. So I think that means that those animals is treating the environment as a, probably like a low SNR task and that will not transfer. So it still maintains that became about dependence. And, uh, we have different, like in figure five-hour paper, like we have a diagram showing you that, okay.  </p>



<p>Weinan    00:46:11    So even Lacy in a single experience, the single animals maybe are different cognitive processes can change the SNR of the data. So for example, you, you have a whole scene, the animals can actually use their covert or overt attention to only focus on part of the scene that night has different predictive value to the outcome. So maybe the animal can just pay attention to the general features in the experimental room. Like the smell may be similar and the experiment or may be wearing the same lab coat, and that’s highly predictable. The other one, like if you just focus on the things that different patterns on the wall, that’s highly idiosyncratic, so that will be low SNR. So I think attention is a very key thing, both in determining the signal to noise ratio also for regulation in consolidation. I just want to add one last thing about the implementation of this regulation that we set in the discussion that replay might be a natural way to do this.  </p>



<p>Weinan    00:47:12    Just regulate the amount replay to modulate consolidation, but it has been shown that replay actually, uh, functions in like a variety of, uh, different ways, uh, to promote, for example, you hadn’t seen attractors in the record network, for example, or keeping the hippocampus in register, at least the neocortex. And so Ray play. So it could be not beneficial to stop replay altogether just to prevent over-fitting a might be the brain might be using it. You replaced, it was still repair it, all the memories. And then you have a predictability module. For example, like the PFC can control offline what, which part of the cortex gets activated and enabled learning in a offline attention manner. Like we have the amazing ability, uh, like for example, you close your eyes, you can navigate within your memory, like focused on certain aspect of things. And maybe the brain could tap into that mechanism to mask certain memory components, uh, during offline replay for consolidation.  </p>



<p>Paul    00:48:18    One of the things that you said. So I kind of have like two questions in one here. Um, one is, you know, there are certain situations where I don’t care about predictability because I have to, uh, climb up the mountain before I fall or whatever, you know, what’s the classic escape, the lion or something like that. Right. Um, and in that case, I guess you would predict that predictability your predictability regulatory system, uh, just gets overrided, uh, perhaps because you, you’re not needing to, uh, really consider how predictable the data is, or maybe it just automatically happens. And, uh, I mean, cause you’re going to remember that event probably unless you’re, unless you die.  </p>



<p>Weinan    00:49:00    That’s another really, really good question. So I think through our framework and many people ask us what about emotional salient memories? That’s really surprising and really normal. The how is that related to the idea of predictability? I think it’s important to keep this two concepts orthogonal to each other. So for example, emotional memories could either be highly idiosyncratic or it could be predictable. I think what, what the emotional salient is doing is maybe I, I’m not sure how much data support there is. It could bias the memory retention, uh, of certain memories. So for example, you’ve climbed the mountain and you made a mistake and that was really dangerous or something like surprising happened. That’s pretty random, that’s surprising factor, maybe enhancing the memory, retention your head campus, and then you can actually remember that memory for a long time. You can tell the story maybe 20 years later, okay.  </p>



<p>Weinan    00:49:58    Back 20 years ago, I had this terrible event, but then what, which components gets transferred to the neocortex is determined by the predictability of the different memory components on top of that. So I think that’s a, almost a first future process of which memories, like we forget, like almost all of our episodic memories in a few days and only a few gets encoded. Uh, maybe that’s more like Hayden ER, hippocampus, and to be weakened by the who knows, but it just seems that way forget most of the things and the certain things that we remember if we do remember is modulated by some kind of other neuromodulatory process. And our theory builds on top of that is the memories that gets retained for long-term storage, which components actually routes to the neocortex and which components should stay in the hippocampus. That’s kind of determined by the secondary factor is the predictability.  </p>



<p>James    00:50:59    I think your question also brings up another interesting and subtle point about predictability and, um, you know, we introduced the teacher as an environment, right? In terms of this is some sort of generator of experiences, but that’s actually pretty abstract because you know what, the brain only knows is the brain’s activity. And so really what the teacher is, is a generator of brain activity based upon for example, sensory and motor experiences in the world. So if you kind of think about the teacher, not exclusively as an environment, but just as a generator of Noel activity, then of course your cognition itself can also contribute to part of what the teacher is. Cause the teacher is just whatever it is that leads to patterns of activity that the student is trying to learn to produce without, for example, the involvement of, you know, the hippocampus or other modules.  </p>



<p>James    00:51:53    And so if you think about the teacher in this way, it could be that for example, some of these highly emotionally salient or very memorable events, they in some sense are very predictable just because you think about them a lot. And because you think about them a lot, you actually do recurrently, get these patterns, no activity that you may actually want the student with the neocortex to be able to start, to be able to be produced on its own. And we think that this may have to do with why, you know, in human patients, for example, they are sometimes able to reproduce highly detailed, uh, aspects of their past life. That seemed to be highly unpredictable. But we think that the reason for this is because basically they were so reproducible based on the experiences of that person or based for example, on the thinking patterns of that person, that they start to be able to be consolidated because they start to be predictable in this more general sense of not just what happens in the environment, but what happens in your mind.  </p>



<p>Paul    00:52:49    So is it too simple to map on the teacher, to the perceptual, uh, systems, perceptual cortex, for example?  </p>



<p>James    00:52:58    Yeah, I think it could be. I mean, I definitely do agree that, you know, we think about the teacher in the most simple setting as just the perceptual systems. And that is kind of the examples that we provide in the current paper. And that is the setting that is designing our kind of initial round of, uh, experimental design. But I do think that when it comes time to really understanding how these abstract neural network models will get mapped onto real human cognition and neuroscience, that that is too simple. I believe that you do need to consider, uh, more broadly what it is it’s meeting to patterns of normal activity that cost the entire quarter.  </p>



<p>Paul    00:53:35    Yeah. But then you would have the problem that, uh, th the sensory cortex should be getting trained also as a generalization optimizer.  </p>



<p>James    00:53:45    Absolutely. And in fact, that’s a very important part of how we think about it is that, you know, um, you know, from the viewpoint of the abstract neural networks, we have very well-defined inputs and outputs. But when we actually think about what that means in terms of the brain, we’re just thinking about the neocortex as if it’s some kind of an autoencoder where, you know, activity is generated by your sensory motor experiences in the world, your cognitive processes. And what you’re trying to do is build a critical system that is able to reliably be produced those patterns going forward into the future without needing sensory inputs, without needing involvement of other parts of the brain. And in this point of view, then, you know, it’s not as though just that, you know, as you said, like some low level sensory area is not only the input of this framework, it’s also the output. And many of these relationships have to get learned simultaneously. And for each one of these relationships, that could be a highly different degree of predictability. And as we emphasize in the paper, based upon that high, then variability and the degree of predictability, there should also therefore be a high variability in terms of the amount of consolidation in terms of different types of synopses within the critical network.  </p>



<p>Paul    00:54:57    So, so when, on, I mean, you, you, you posited, uh, evolutionary architectural constraints versus metal learning earlier, uh, when talking about the regular, how to regulate the system. Um, so you think that there’s room for, uh, both, I suppose. Yes. I think I, I don’t want to get into yeah, no, that’s okay. I, I just, I just wanted to make it clear. So, so Nate, wait, so nature and nurture are factors.  </p>



<p>Paul    00:55:30    So one of the things that’s, uh, know fairly attractive about complimentary learning systems is, is the idea of, you know, when you have complimentary systems that the whole is greater than the sum of its parts. Uh, and actually Steve Grossberg, um, calls this complimentary a complimentary computing paradigm. Um, and, you know, he thinks of multiple processes in the brain acting like this and that when you have these two things working in parallel, both neither of which can do well on its own, but when they are paired together, actually, um, give rise to a, what he calls a new degree of freedom and extra degree of freedom. How would you describe that in terms of the whole being greater than the sum of its parts, uh, with this go CLS architecture?  </p>



<p>James    00:56:19    Yeah. So I think the sum is greater than its parts and at least several interesting ways within our current go CLS framework. So one, as Andrew mentioned earlier, we’re able to determine, well, what would be the optimal learning rate you could have for generalization, if all you had was the students. So you just don’t have any, uh, hippocampus or notebook you can use, do we call the past? And because we could kind of treat that problem, optimally, we could show very rigorously in fact, that when you put the two systems together, that in fact you do get a critical network that generalizes more efficiently from the data than you could from online learning. And so there’s a really fundamental advantage where if you’re going to have some finite amount of data, you can just make better use of it. Period. If you have the ability to record it somewhere and we call it subsequently to guide learning.  </p>



<p>James    00:57:08    So that’s one way, another way that kind of also came up a little bit earlier, is that actually even when it comes to memory, there is a benefit. At least if you have a small notebook or a small hippocampus system, because what we were able to show there is that in that setting, you actually get some errors when the hippocampus or the notebook is trying to recall those memories. But what’s really amazing is that the nature of those errors is that it’s interference with other memories. And so if what you’re actually trying to do is train the student to memorize that. In fact, you can actually do better training from those noisy reactivations than actually those noisy of activations themselves. And so what ends up happening is that quite counterintuitively, the training error or the memory performance of the student can actually outperform what you could get in a notebook alone. So yet again, for both of these cognitive functions for both the memory and the generalization, uh, the system works better when you put the two parts together.  </p>



<p>Andrew    00:58:09    And one, just to elaborate one small piece of that is it also clarifies the regime where you get the benefit. So there are regimes where online and sort of just having the students and, um, the replay strategy will look very similar and that’s, if you have tons of data, you have tons of data. It doesn’t matter get to the same point, but both of them, um, also if your data are very noisy, then, um, in the limited data regime, it can still, the gap can be fairly small. So I think it delineates the regime where this dual system memory, that sorts of worlds basically, where it’s the most useful. And it happens to be when you have sort of a fairly moderate amount of data and that data is quite reliable. Then you see a big advantage from replaying a lot. And arguably that’s a setting, a lot of real-world experience falls into.  </p>



<p>James    00:59:06    Yeah. And just to follow up on that, you know, what’s incredible about that as well is that that same regime is where the risk of overfitting is highest in the model. And so what’s really interesting if you just kind of step back from the details of the model and think about what it might mean. We basically say that, well, if you’re in a regime, we’re actually having these two learning systems is complimentary. You’re also in the same regime where if you don’t regulate, you’re going to overfit. And we think that this is a really important conceptual point because then, you know, you know, we do know that, you know, the biology has built multiple memory systems in the brain. And one of the lessons we can walk away with our artificial neural network is that perhaps the fact that it’s built, it suggests that it’s good for something. And at least within our framework, it’s very rigorously true that when it’s good for something you absolutely need to regulate you can’t just kind of transfer everything to the cortex. Hmm.  </p>



<p>Weinan    00:59:53    Yeah. So this is going back to James’ point on like, we have a notebook to replaying samples and because a notebook has limited capacity, uh, it shows some interference. So the replayed example is not exactly the training examples. There there’s some error and this error just surprisingly, as James mentioned, that it does not hurt, uh, training the student any in fact, that, so I think this is a great story. That is just so even counterintuitive that I was running the simulations and, uh, James and Andrew here at Janelia. And, uh, I showed them the curves for the first time. So the notebook reactivation error is like, there’s a little bit error due to interference and our youth, this type of re activated data to train the student. And it turns out the student training error can drop below that reactivated error. And that’s really where like you are using the notebook, reactivity examples as labels, and you can’t possibly do better than that, that rehabs reactivation.  </p>



<p>Weinan    01:00:53    And later on, it just, that led to a lot of mathematical analysis. And it turns out that I think to me, this is so powerful and they questioned me. Okay, wait, not that that’s, uh, you should check your code if there’s any bugs and that I do something crazy. So I said, okay, no matter what I do, uh, I’ll get the lower error from the student. So what if I just generate random activities in the notebook and to my surprise that still trains the student perfectly with some change in the learning rate. So I think this is something deep that about like maybe if listeners, uh, building future genders and models of memories. I think there’d be activation has to a certain property that can enhance generalization mainly by. So let me give you an example. So in machine learning, after all there’s I discovered a connection that they use some certain data argumentation methods called mixed up that you just like, for example, you’re trained in internet or amnesty, and he would just linearly combine different training classes together and also combines the output probabilities together, like adding them together. And they only use the mixed examples to train the net network.  </p>



<p>Paul    01:02:05    The images themselves are mixed. Like one image would be a mixture of two separate images.  </p>



<p>Weinan    01:02:11    Yes. So you’re just randomly some pole like a two to four images and the stack one and two together, and then opposite probability will be like, you have 10 outputs and it would be 0.5 0.5. Yeah. And he only used this. It just got rid of the original pictures. You only use the composite images to train the network and it trains equally well, and sometimes it’s even better. And this has became a very powerful, uh, like data augmentation in even modern transformers, uh, that is performance much better. And it was also like really interesting being used in a data encryption. Like there is the Princeton study showing that, you know, the hospitals have records to train some kind of AI model for prediction. And because of the data privacy issues, they just run them, emerge the patient’s data together and merged the output together. And it still trains them out equally well, but masking the original data.  </p>



<p>Weinan    01:03:10    So I think this interesting connection is, I mean, the, uh, the brain is capacity limited. And if you want to store some previous experiences to turn up your neocortex, and you’ve such a flexible ability to change your input data and still train the neocortex. Well, I think the brain might be tapping into this mechanism and have sort of weird ways of generating examples instead of just replaying train examples. One by one X. Exactly. There might be random mergers, uh, off memories that, that has been supported by experimental data in the hippocampal field that certain memories are just prone to errors. So for example, you, you imagine a house like you left your house yesterday and there could be related things, uh, being put wrongly, put in the scene. Like you will remember, okay, there’s a car parked right in front of me, but, but it was not. So there’s this, uh, leaking interference between memories could also be helpful in turning the neocortex because you know, a car is a car sort of independent of what, which scene it’s at so that this kind of interference might not be as bad at training the neocortex. And that probably reflects some certain composition. Now the nature of the world, like you can have different things merged together and still give out good training. Uh,  </p>



<p>Paul    01:04:32    This is a trivial question, or maybe this is a trivial question, but how do humans perform on the mixed image net dataset? I think  </p>



<p>Weinan    01:04:40    I can’t tell them our apart  </p>



<p>Paul    01:04:43    Will sell. So then does that run counter to the story you just told then? Because presumably unless, um, unless that’s just an inherent difference with, you know, deep learning networks, which we’re going to talk about here in a second anyway.  </p>



<p>Weinan    01:04:57    Yeah. So that perceptual example might be different, but you know, if, if you really put different objects together and humans have the amazing ability, like if I put like a cup of apple, a weird, like a car in the same scene versus seeing them apart, I think humans will have an easy time to pick up, okay, how many objects are even the scene and to give each labels?  </p>



<p>Paul    01:05:18    Oh, I thought that the pictures were blended, like where you take the RGB values and oh, they are so-so, they’re not compositional pictures. They are, they’re literally blended where we wouldn’t really be able to perform well. Is that true?  </p>



<p>Weinan    01:05:32    Yeah, I, I look at, so yeah, this, I need to think about this more, but maybe there’s something there.  </p>



<p>Paul    01:05:37    So predictability is, uh, a key or the key, is the world a really predictable or is it super noisy? Are we ever in a, I’m trying to think if I’ve ever been in a situation where it was completely unpredictable, maybe early on when I was dating, but, so how does predict, uh, are there situations where something is just truly unpredictable and if so, how does the network handle that?  </p>



<p>Andrew    01:06:04    Yeah, it’s a great question. Because so far we’ve basically been talking about predictability as noise. So, you know, we’re real randomness coin flips, but it doesn’t, in fact, all the same phenomenon occur in completely deterministic settings. It’s just what th th the equivalent is that the teacher is more complex in the student. So if there’s something that the students can’t possibly represent about the teacher, and I think that is definitely a reasonable assumption about the world, right? This very, very hard to imagine that we would be able to predict everything about a physical situation. And so essentially that unmodifiable component, which, um, you know, learning theory, we call it the approximation error. It looks like noise from the perspective of the student. And that judgment is, is completely relative to the students. What, what can the student actually do? And then if the teacher’s more complex than that will have the exact same effects, you can see the same, over-training all of the same behavior and learning curse. And another version is maybe you have a completely deterministic world. Maybe it’s completely predictable, even it’s just that you don’t observe the full input. So imagine that, you know, the teacher has a hundred input nodes, whereas the student only has 50. Now the remainder looks like unpredictable, uh, information from the perspective of the student again, and that will have all the same properties as if it was really just a noisy environment. So there are several forms of predictability that behave similarly and, um, would, would require the same regulation, uh, and transfer between brain areas.  </p>



<p>Weinan    01:07:55    Yeah. So just, I want to add to that, like maybe I was just trying to really think about this intuitively with the, these are silly, silly examples. So for example, the noisy teacher, you just, you can’t imagine like someone going to the casino in Vegas and then play the rotate and then pick them number 27 and a, a lot of money on it. And it, he lost, he lost like $10,000 and then the unregulated consolidation will be that you’ve treated number 27 as the bad number of forever. Like you just learned that I should never pick number 27, despite it’s kind of random. And that could be detrimental to your future. For example, if you are dating a girl whose birthday is now August 27th, and you said, oh, no, no, no, I’m not going to date this girl. So that’s going to be bad for generation.  </p>



<p>Weinan    01:08:43    And the second example is about the complex teacher. Like this is really give me that insight on, you know, why a lot of episodic memories are being kept in the hippocampus and require the hippocampus. They just, a lot of it is because the world is so complex. Just you just imagine you’re on a street, there are different things happening. And, uh, a lot of them are independent processes has its own cause and effects and to cross predict such like complex interactions between so many things, it’s generally impossible for our human brain to do. And like intuitively example is that a lot of times in movies, you see like a tragic, like a tragedy, like a part of the scene is lowest of the low, like the person got a cancer and then hit by a truck and there’s something else happened. And I, the actor just start to cry, like a why, why me?  </p>



<p>Weinan    01:09:38    I think at those times it might not be beneficial to really consolidate, consolidate such complex events. Like it’s better to remember those things, but if you overgeneralize from those complexities, it’s going to hurt generalization. And the last thing about a partial predictable partial opposites over ability is, I mean, a lot of like most of the time our perceptual access, uh, to certain events are really limited. Like people always say like the traditional ways is that, uh, you should really put things into context and don’t just judge things by the first Glen’s like someone is behaving in a certain way and you get offended. And maybe that person is having a really, really bad day. You know, you, can’t just based on that partial observation that this guy, just a grumpy person, and he’s not friendly. And maybe it’s just keep that as episodic memory, and maybe you can build up more accurate representation of this person by long-term interactions. So I think that, I think about those three, I’m pretty busy unpredictability this way.  </p>



<p>Paul    01:10:41    Can I throw one more concept into the mix? Uh, this seems to me related somehow to concepts like Herbert Simon’s satisficing and bounded, rationality, and Kahneman, and Tversky’s heuristics the use of heuristics and good enough, uh, scenarios. Have you guys thought about how your work and the results and implications of your work overlaps with those sorts of concepts? So,  </p>



<p>Andrew    01:11:10    Yeah, bounded, rationality it’s because we kind of are assuming, you know, we have this Oracle assumption, you know, that predictability, we’re optimizing all parameters at the setting, but we’ve constrained the system to be this particular neural network with inherent limitations in that. So, yeah. I mean, I wonder if that is a version of, I guess it’s like bounded architectural rationality, you know, there’s like something baked into the architecture that you just, it only is going to take you so far. And, um, and in terms of heuristics, I mean, yeah, I guess you could maybe view it, uh, similarly that you, you may be forced into a simpler solution to what is actually a complex problem, just because of the resources that the student actually has available to it. But I don’t, yeah. I haven’t really used in connection  </p>



<p>Paul    01:12:02    Jammed. It looked like you were going to add something.  </p>



<p>James    01:12:05    No, I think that was a good answer. I wasn’t going to add anything more than that. I was just going to kind of bring up the same points that this notion of unpredictability, as it relates to approximation, or is kind of giving the idea that the cortical network may only able be able to do so well, and that could have to do with the architecture of the network. It could have to do, for example, with what it’s able to learn, the learning mechanisms involved in that network. And so it is a notion of bounded rationality, I think for sure. But then how closely that would relate to the more famous notion of bounded rationality, I think is a very interesting and deeper question that I think is harder to kind of answer at the moment.  </p>



<p>Paul    01:12:47    Yeah. Because, uh, here, one of the great things about heuristics, although they, they fail in many ways, but they’re also beneficial in many scenarios. And that’s kind of why I was wondering, because, you know, you have to have this, uh, predictability estimator and it needs to be beneficial for the organism. And then I was thinking, you know, heuristics for all their failures are also very beneficial in certain scenarios. So yeah,  </p>



<p>James    01:13:09    I mean, I think you could view whatever it is that the student learns in our framework as a heuristic, because it is going to be an incomplete and inaccurate to some degree representation of what the teacher actually is. But as you said, this heuristic is very useful and in our setting, it’s very useful in a very precise mathematical sense that it nevertheless optimize a generalization, given the bounded rationality possible for that system. And so it kind of brings these two things together, um, that, you know, if you have some sort of bounded rationality or some sort of limitations in terms of what the system can do, then, you know, obviously you can’t do better than that, but then the heuristic may be the best possible thing.  </p>



<p>Paul    01:13:51    But so you guys put this in terms you’re careful not to just map on the networks to the brain areas, to hippocampus and cortex and the environment or perceptual cortex, for instance, uh, how should I think about this? Should I think about this as theoretical neuroscience? Should I think about it as, um, artificial intelligence work and then what does it imply? Because there are, you know, like, uh, what we alluded to earlier, there are networks already with external memory, uh, there are metal learning networks, so what could deep learning and, or, uh, AI in general take, um, from, from these networks?  </p>



<p>Weinan    01:14:29    Yeah. So yeah, like you mentioned, there are a lot of memory augumented neural network, neural networks out there, and also memory based RL agents that can use like it’s typical days competing in some kind of cortical modules, like, uh, LSTM, uh, to, uh, external memory. So typically the memory is fairly simple. So a lot of times it’s just a pending each new experience as a like additional row. So it’s kind of panning into a big matrix. And a lot of times there’s a keys and values. Like you use the keys to search and to retrieve a soft max average, uh, output as your episodic memory. And that has been very successful in certain problems like the new oratory machine or different show, your computer work from deep mine, Casella vastly different problems than the traditional and your networks. Uh, like Greg Wayne’s Merlin framework also can solve that the water maze like typical LSTM can not perform, but I think there are inspirations so we can take from the  architecture.  </p>



<p>Weinan    01:15:35    I think instead of like first thing, if that is still like the experienced rate play in our, our agents and the usage of online, like a memory, excellent memory modules, those two are different things that a different module to doing the memory storage and doing the online inference. Now in the mammalian brain, we think the, we use the hippocampus to predict the future, but we also have evidence that the hippocampus is replay and, uh, serving as experienced replay buffer to train up the neocortex. So maybe there’s a vantage of a merging the two modules together. So that’s one direction. And the second direction is that how exactly, uh, should be the memory representations in X nor memories like he, instead of just appending different rows, uh, can we use the like more spars distributed representation and, uh, uh, biologically realistic retrieving rule for memory retrieval?  </p>



<p>Weinan    01:16:31    So there’s actually a very interesting work. Uh, I think in the end of last year, there’s a group showing that how few network, like a modern continuous Hafi network is equivalent to the transformer self attention mechanism. So there’s some deep connections here, maybe like memory and attention are really like different aspects of, of the same thing. So I think using some kind of a hippocampus inspired architecture, um, maybe there’s some certain research direction it’s gotta be uncovered. I don’t know exactly what yet. And also, I think the last thing about the memory module is we use a healthy network and that’s fairly traditional. And the current AI accidental memory modules, they use like a form of like more events versions of those generative models, like variational auto encoders or, uh, against. So, uh, these things, I think like one insight might inspire AIS that, you know, we know the anatomical connections between the hippocampus and, uh, between like the rest of the cortical areas, not also the PFC and the typically people doing variational autoencoder work assumes there is a yin putt going into, uh, a series of hierarchies and arriving to the head campus at the head of campus.  </p>



<p>Weinan    01:17:59    It’s sort of try to encode a latent representation that it can reconstruct that input to reduce the reconstruction error, but maybe there is a way to improve this by. I just realized that, you know, there is, uh, architecture called vegan is VAE again. So it’s a V and again, connected together. So the idea is that instead of reconstructing the original input, you sent your reconstruction to, for example, the PFC and the PFC serves as a critique or the disagreement enter in again and tell and feedback, the signal that is this realistic or not. And this is a plausible assumption to you to make, because like a lot of patients with schizophrenia, if they have lesions in the PFC, they cannot tell the difference between imagination and real world. So it might be true that the reconstruction from the hippocampus is sent to multiple modules to compute different calls, functions, for example, can be post reconstruction, or it can be reconstructing another, uh, discriminator is function to best reproduce that sensory stream. So I think, I think some architectures like this, like a multi-head, uh, generative motto as excellent memory will be very interesting.  </p>



<p>Paul    01:19:18    W where different parts of cortex serve as different modules for different, uh, well, like you said, cost functions is in the vernacular of AI, but, uh, purposes, I suppose, in the vernacular of organisms. Yes.  </p>



<p>Weinan    01:19:31    Yeah.  </p>



<p>Andrew    01:19:32    Yeah. I mean, I also think there is maybe not as, uh, exciting lessons to be learned from this work for AI. Like, uh, if you build a continual learning system that does store its own examples and manage its own learning, then it’s going to have to regulate you not to replay. That’s a very simple point, but I do think that’s probably something that will start emerging. And it’s an interesting, broader question. How do you decide when to learn and how to learn or like ultimately agents? We, we decide, oh, this was a learning episode. I’m going to store that. Presumably how do you manage those  </p>



<p>Paul    01:20:16    Look at, because in the original complimentary learning systems, more replay is better always. Right. And that’s one of the take homes here is that you have to regulate that and knowing when to regulate it is a pretty important factor.  </p>



<p>Weinan    01:20:29    Yeah, yeah. Also with that point, uh, I think more than more than RL algorithms, it’s kind of having like a lot of problems of generating, uh, generalizing to new tasks, like train on one game and a test on the unseen levels of the other game. There are a lot of effort improving this, but I think those agents will be benefited by having a different volunteer with that specifically as to mate the predictability of the captured experience used to have like replaying or train all possess your data. You only train based on the score of that predictor, like, so you can actually filter through your experiences only generate generalize the useful components. So maybe that will help our out as well.  </p>



<p>Paul    01:21:16    So one of the recurring themes these days is that AI is moving super fast and neuroscience, especially experimental neuroscience because experimental science in general is very slow. But, uh, even, uh, I think theoretical neuroscience is kind of lagging behind the progress of the engineering in AI, right? And so what this is, is theoretical neuroscience, at least partly, right. We talked about how it’s kind of a mixed bag of things, but do you see this kind of work theoretical neuroscience more broadly as being able to, so backing up what, what the implication of that rate of progress means is that right now and for the past 10 years or so, AI has been informing neuroscience a lot more and the direction of the other arrow from neuroscience to AI is slow or lagging or lacking. Do you guys see theoretical neuroscience as a way to, um, bring more influence from neuroscience into AI?  </p>



<p>James    01:22:13    Yeah, I mean, I mean, just to hear what everybody else has to say, but for me, I definitely do, because I think that, you know, it may be slow, but I think that theoretical neuroscience and kind of really rigorously working out how individual models work and how they relate to the biological brain. I think that that provides kind of fundamentally new and fundamentally, uh, Volvo bust conclusions. Then you can get from just kind of numerical experimentation on very large AI systems. And so I definitely think that, you know, we wouldn’t have gotten to where we currently are in AI without past generations of theoretical neuroscience research. And I also definitely think that, you know, projects like this, where we try to kind of boil it down to the essentials and really analyze everything very rigorously and really try to figure out to what extent this relates to the biological brain will provide useful seeds for future AI research.  </p>



<p>Andrew    01:23:06    What do you guys think? Do you guys agree with that? Yes. I mean, I think James and I see very similarly on this it’s the timescale may be long, but ultimately I think theoretical neuroscience and psychology just have an enormous role to play in driving AI, but you have to be willing for that impact to happen many years down the line, but just imagine, I mean, for instance, deep learning, right? The whole thing. Um, the fact that we’re using neural networks, those all came from contributions that were worked out in dialogue with our scientists. And if what theoretical neuroscientist were doing right now today, 50 years from now at a similar impact, wouldn’t that be amazing. It’s something that everyone would want to work towards. So, and that’s how I really do think that’s possible. You know, we don’t know what those it’s more in Kuwait, it’s more uncertain. We don’t know what these principles will be, that will guide us towards even better systems, but having the insights from the brain, guiding, um, the inquiry and showing us some of the problems that maybe we didn’t even realize were problems, uh, is really valuable.  </p>



<p>Paul    01:24:13    But my perception is that the bro, the in large part, the majority of folks working in the AI world, what they would say is, well sure, theoretical neuroscience may eventually, uh, provide us with something, but by that time, our systems are going to be so advanced that it won’t matter, right. Because we’ve already, uh, basically accomplished that. And I mean, I think that that personally my personal opinion, it’s just an opinion, uh, is, uh, silly. Do you think that that’s right, that the AI world thinks that, and is it comforting to know that the mass majority is wrong? Or how, how do you think about that?  </p>



<p>Andrew    01:24:49    I, I do think it’s right to say that that’s why it’s very opinion. I’ve had AI people telling me, like, why do you even do mathematical theory? Because eventually I’m going to make a theory prover. That’s going to do the mathematical theory and explain it back to us. So, you know, just focus on getting the AI system to work. We won’t know until the proof will be in the pudding. My own opinion is that we’re going to make, it’s going to turn out that today’s AI systems as fantastic as they are, we’ll hit roadblocks. And part of getting them unstuck from those roadblocks will be looking again to the brain just as happened with deep learning again. I mean, I think this history often gets kind of run rough shot over, but that the paper on backpropagation the first author is David Romo Hart, who was a psychologist, the first paper on the perceptron that’s Frank Rosenblatt, he’s a psychologist.  </p>



<p>Andrew    01:25:43    Um, the contributions. And, and I remember as an incoming graduate student, the paper that inspired me towards deep learning was from, and I’m sure it’s different for different people that were AI people working on it. But for me, it was Tomasa Panchito’s work and maximally advising Uber, and they’re neuroscientists that theoretical neuroscientist, the reason why they were interested in these convolutional network architectures, even though the rest of the field, hard to remember, but the rest of the field was not totally different. The reason why they were interested is because they kept looking at the brain and saying like, this is what we see in the brain somehow has to work. And so I think that, um, that promise is still there for the future. And if you think about some of the topics, like theory of mind, um, some of these ideas that have come from cognitive science, again, ultimately causal reasoning, all of these things have been pointed to by cognitive scientists. And now we’re seeing that yeah, they really are important and they require their own methods to address. Yeah. So I guess I think it will continue to be important going forward.  </p>



<p>Weinan    01:26:53    I just want to add on that. I totally agree with both James and Andrew. I think neuroscience steel has a lot to offer to AI and especially the exciting new direction like, um, Yoshua Bengio has proposed this idea of, we should learn from human cognition or animal cognition, all the systems, two level combination. So that’s comparing to the directly perceptual classification tasks that apples to apple is like you have long-term deliberate planning and reasoning that, you know, had, you have to think about things and those abilities are not well captured yet in the current models. And one of the solutions I think Yoshua Bengio was proposing is there’s some kind of attentional, so called a sparse called a grouse. That is a good word motto. And you’ll form this cultural nose, uh, through learning, like whether by semi-supervised learning or by other methods. And you form this called the note and you can actually reasonably within that word model.  </p>



<p>Weinan    01:27:55    And the key thing, I think just one of the key things is there’s a recent debate about whether to learn things from end to end, or just have unique structures all at the other side, like whether to use deep learning or to use good old fashioned AI, like mainly on symbolic processing. I think there is a trend now is, is actually beneficial to learn the symbol like or abstract representations of entities in the world. It has to be an old topic in psychology, but read more, more recently, like, especially my work, I run bill O’Reilly and by Jonathan Colin’s work from Princeton, they show that if you couple a LSTM controller to an external memory and only manually collate the memories indirectly from the keys of the memories and through learning through many, many simulations, you can form this symbolic representations in the keys. So I think that’s a kind of a key insight offering, like how the brain might be generating those abstract representations.  </p>



<p>Weinan    01:28:55    And it could be in like a hypothetical, like structure that the cortex and the sensory and coding actually are bound together through fast plasticity. And the cortex can learn to manipulate those memories. And eventually symbols are generated by this manipulation. So this is kind of going from sensory generalization to the next level, the out, out of sample distribution. Um, and, uh, it’s like a higher systematic level generalization idea that have chemists might also offer insights into this process. So I think this system two level combination is really like what is needed for a more, more powerful, or even like a more humanlike agency in the future.  </p>



<p>Paul    01:29:41    Okay, guys, this has been a lot of fun. Uh, I want to wrap up, let’s do one round of what kept you up last night. What were you thinking about? That’s just at the edge of your knowledge. And I know that this is a really more of a question for the beginning of a conversation, but something may be unrelated since I know you all have different lives outside of this, uh, topic that we’ve been talking about a wane on. Can you tell me what, uh, what’s been troubling you lately that you can’t quite figure out just beyond your reach?  </p>



<p>Weinan    01:30:13    Um, yeah. A lot of things to be Frank. Yes. So I think reflecting back to the, on the collaboration, I think there are really two things that really troubled with is so mayonnaise, just a due to the journal sentence of, uh, like being adequate for so many years. So, so being an experimentalist, interacting with those Andrew and James, and just the first few years, I, I found this interdisciplinary work it’s just to be extremely challenging. And, uh, you know, I was stepping outside of the synaptic transmission world, single cell computations to cognitive science. I’ve like, I’ve never heard of direct a great amnesia curves before those to be Frank. And, uh, I was surprised to find out, okay, there are so many diversity. I thought this are all like really figured out. And I was just diving into this research researcher. And, uh, and also on the other side, I diving into the mathematics and the Mo more than machine learning. It’s really trying to learn multiple things that has been extremely difficult, but also rewarding. So steel that poses the challenge to me, I think if you try to learn multiple things at the same time, I mean, I’m just barely feeling that I can barely keep up with the minimum amount of knowledge happening in dispose field. So I, I still don’t know how to deal with that. Maybe more collaborations, but you know, our human, this is how a certain amount of hours during the day. So how do you read so many papers? What’s keeping up?  </p>



<p>Paul    01:31:47    Yeah. You can’t read so many papers, so there’s, isn’t there a perfect interleaving of time spent on various things going back and forth. And have you figured that out because I don’t know it  </p>



<p>Weinan    01:31:58    That’s going to be the next model  </p>



<p>Paul    01:32:01    Learning strategies.  </p>



<p>Weinan    01:32:03    So I think the second thing just real quickly is really like this process, like with Andrew and teens. I mean, I think I talked me about the, you know, really the benefit of really talking to people in different fields. And that’s kind of obvious now, but, uh, but in the beginning was not easy. Like I felt like being an experimentalist, James and Andrew had to explain things very carefully from very basic level knowledge to build me up. I think I really appreciate that. But also like through interactions we found like including, uh, my current advisor Nelson, who are also at experiment lists, like the communication between theory and experiment are extremely, like, could be challenging at times because we spoke different languages that, for example, like even games and Andrew, like the idea of generalization in machine learning is well appreciated and that has entirely different meaning in the experiment experimenters mind.  </p>



<p>Weinan    01:33:01    So I think that communication is hard. And also, like I talk about experiment, details that huge complexity in experiment neuroscience. I think that sometimes we just assume people know this knowledge and to know this complexity, that’s not true neither. Right. So I think the natural tendency is really like, okay, I just gave up, we theorists works together because we talk well to each other and experiments work together because you know, it’s really effective communication, but I think what’s what I learned is it’s really important to be patient and to really try to understand each other. Uh, so I think that still is challenging science communication is super important, important for multidisciplinary research.  </p>



<p>Paul    01:33:44    Is there a role of predictability in this complimentary collaborative learning a whole organism system system? Sorry, I just, I couldn’t help the analogy to go CLS here, but yeah, no, I was joking about the predictability. All right. Well, that’s great. Um, Andrew, do you want to, uh, chime in anything bothering you, uh, last night that you couldn’t, that you are frustrated, you just can’t figure out, et cetera?  </p>



<p>Andrew    01:34:13    Well, one very, this is very specific, deep learning theory, but I actually think it’s quite important when you look at the brain, you see lots of modules, you see this complex interconnectivity of sort of as a scale of modules. And we all think that those modules are kind of specialized for function and kind of not, and it’s distributed representations, but also not. And somehow this all is important for generalization and systematic reasoning and cognitive flexibility. And I want to understand why, and it’s still very much bothers me that if you don’t have a theory, but maybe to build on one of Wayne’s points to another thing, which keeps me up at night is whether this theory that we proposed will in fact, be tested by an experiment. I thought you  </p>



<p>Paul    01:34:57    Guys had, um, begun sort of the early stages of thinking about how to act. I was wondering if you had already begun, but I knew that you’re thinking about how to test it.  </p>



<p>Andrew    01:35:07    Yeah. And, and if anyone could do it, it would be weighed on and Nelson’s lab is perfect for it. I have however, been in the beginning stages of many experimental tests that have not finally panned out. So, you know, it’s just, I think like when I was saying it’s, it’s hard to cross these communities. And one of the things that I would love to see is how we can create this feedback loop and virtuous cycle and actually get it functioning on all cylinders to sort of, you know, make the theories concrete enough that they can be falsified and make those experiments actually happen using all the amazing methods that we have now.  </p>



<p>Paul    01:35:44    All right. So James, I’m sorry. We ran out of time and can’t include you. No, I’m just kidding. James, do you have something that, uh,  </p>



<p>James    01:35:51    Sure. I think it’s actually super interesting that, you know, both way naan and Andrew highlighted the difficulty of how hard it is actually to kind of really get these feedback loops going really robustly. And I agree with that and that really does, you know, often keep me up last night. I mean, I guess in reality last night there was a big election in Virginia. So maybe that wasn’t what was keeping me up at night, but many nights I may have been what keeps me up, but I think maybe also related to that, I think, you know, and also maybe going back to a theme of earlier about, you know, the, the value of kind of abstract models. Like I think that actually when I I’m often kept up at night by science, it’s often because it’s a real concrete math problem that actually keeps me up at night most often, because I feel like when you get it to that level, you can think about everything so crisply and you can really get the sense of, you know, I’m really about to solve this problem.  </p>



<p>James    01:36:36    Like if I just think about this a little bit longer, I’m going to have that solution. And so actually there another one there, and maybe I’ll give a little plug. I completely unrelated to this work. Uh, I’m collaborating with, uh, two of the very best while such Janelia when trying to analyze the links between structure and function. I know one that works using geometry and we’ve had a lot of really interesting conversations over the last week where I’ve learned a lot mathematically about like, how to think about this problem. And that’s what keeps me up at night in the positive way. Maybe the negative way is worrying. Like, are we communicating clearly enough that we’re going to be able to kind of break down these barriers, but maybe also just highlight that sometimes you stay up because you can’t bear to go to sleep. And the solution to the math problem is so cool.  </p>



<p>Paul    01:37:14     is the desire for the solution to the math problem due to your background in physics? Or is it just, uh, uh,  </p>



<p>James    01:37:21    I think I, I would actually flip it. I think actually my background in physics is probably due to my desire to sell math problems. And I think that this is why it gets so hard, I think, to cross these boundaries, because I think that, you know, fundamentally probably why different scientific communities, like why their individuals went into science could actually differ. I mean, for me, it probably really is that the beauty of like solving a math problem, but you know, for many other scientists who have a lot of valuable expertise to lend me, that’s not why they went into science and they went into science for completely different reasons. And so then how do we kind of, you know, not only communicate to each other, to help them understand what we understand, but also as well, what we’re motivated by and go to Andrew’s point about like, okay, well, why don’t many theories get tested?  </p>



<p>James    01:38:05    And I think that a lot of times it happens because the motivations of the theorist and the motivations of the experiment was actually not the same. And so, you know, the theorist may be like, oh, I don’t understand why you don’t want to test this. But then the experiment, I was like, I don’t know why you keep on talking to me about this Boeing theory. That’s all this other stuff going on. Like, you know, I just found this crazy thing, like look at what actually came out of my experiments. And I think that like, you know, getting those motivations aligned, I think is another huge part of what will eventually be needed. I think, to kind of close these disciplinary divides.  </p>



<p>Paul    01:38:32    Oh, it’s a challenge. Um, all right, James. Well, uh, I’m gonna let you go do some beautiful math guys. Thanks for talking to me for so long. It’s really cool work. And I appreciate your time here today.  </p>



<p>James    01:38:43    Thanks for having us. This was a lot of fun. Thank you. Paul  </p>

</div></div>


<p>0:00 – Intro<br />3:57 – Guest Intros<br />15:04 – Organizing memories for generalization<br />26:48 – Teacher, student, and notebook models<br />30:51 – Shallow linear networks<br />33:17 – How to optimize generalization<br />47:05 – Replay as a generalization regulator<br />54:57 – Whole greater than sum of its parts<br />1:05:37 – Unpredictability<br />1:10:41 – Heuristics<br />1:13:52 – Theoretical neuroscience for AI<br />1:29:42 – Current personal thinking</p>
]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/182c98b5-6e8f-41c3-a3c7-2054e20b5786-120-James-Fitgerald-Andrew-Saxe-Weinan-Sun-public.mp3" length="96328657"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[




Support the show to get full episodes and join the Discord community.











James, Andrew, and Weinan discuss their recent theory about how the brain might use complementary learning systems to optimize our memories. The idea is that our hippocampus creates our episodic memories for individual events, full of particular details. And through a complementary process, slowly consolidates those memories within our neocortex through mechanisms like hippocampal replay. The new idea in their work suggests a way for the consolidated cortical memory to become optimized for generalization, something humans are known to be capable of but deep learning has yet to build. We discuss what their theory predicts about how the “correct” process depends on how much noise and variability there is in the learning environment, how their model solves this, and how it relates to our brain and behavior.



James’ Janelia page.Weinan’s Janelia page.Andrew’s website.Twitter: Andrew: @SaxeLabWeinan: @sunw37Paper we discuss:Organizing memories for generalization in complementary learning systems.Andrew’s previous episode: BI 052 Andrew Saxe: Deep Learning Theory


Transcript

Andrew    00:00:04    I guess the jumping off point is this long running debate about where memories are stored in the brain. And it’s a profound question is, you know, something that’s, people have struggled with for many decades at this point.  



Weinan    00:00:17    Like this is really give me that insight on, you know, why a lot of episodic memories are being kept in the hippocampus and require the hippocampus. It’s just a lot of it is because the world is so complex.  



James    00:00:32    I definitely think that, you know, we wouldn’t have gotten to where we currently are in AI without past generations of theoretical neuroscience research. And I also definitely think that projects like this, where we try to kind of boil it down to the essentials and really analyze everything very rigorously and really try to figure out to what extent is relates to the biological brain will provide useful seeds for future AI research.  



Speaker 0    00:01:01    This is brain inspired.  



Paul    00:01:14    Hey everyone, it’s Paul today. I have three find folks on the podcast. James Fitzgerald, Andrew Sachs and Wayne on soon. James and Wayne on are both at the Janelia research campus...]]>
                </itunes:summary>
                                                                            <itunes:duration>01:40:02</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 119 Henry Yin: The Crisis in Neuroscience]]>
                </title>
                <pubDate>Thu, 11 Nov 2021 17:56:33 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-119-henry-yin-the-crisis-in-neuroscience</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-119-henry-yin-the-crisis-in-neuroscience</link>
                                <description>
                                            <![CDATA[
<hr class="wp-block-separator" />



<hr class="wp-block-separator" />



<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2021/11/art-119-01.jpg" alt="" class="wp-image-1511" />



<p class="has-text-align-center"><strong>Support the show to get full episodes and join the Discord community</strong>.</p>



<div class="wp-block-image"><a href="https://www.patreon.com/braininspired"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2021/09/become_a_patron_button@2x.png" alt="" class="wp-image-1434" /></a></div>



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2021/11/yinnew.jpg" alt="" class="wp-image-1512" width="240" height="320" /></div>



<p>Henry and I discuss why he thinks neuroscience is in a crisis (in the <a href="https://www.amazon.com/gp/product/0226458121/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=0226458121&amp;linkId=c85752df380add9cf158431c769a8f2b">Thomas Kuhn</a> sense of scientific paradigms, crises, and revolutions). Henry thinks our current concept of the brain as an input-output device, with cognition in the middle, is mistaken. He points to the failure of neuroscience to successfully explain behavior despite decades of research. Instead, Henry proposes the brain is one big hierarchical set of control loops, trying to control their output with respect to internally generated reference signals. He was inspired by control theory, but points out that most control theory for biology is flawed by not recognizing that the reference signals are internally generated. Instead, most control theory approaches, and neuroscience research in general, assume the reference signals are what gets externally supplied… by the experimenter.</p>



<ul><li><a href="https://www.neuro.duke.edu/research/faculty-labs/yin-lab">Yin lab</a> at Duke.</li><li>Twitter: <a href="https://twitter.com/HenryYin19">@HenryYin19</a>.</li><li>Related papers<ul><li><a href="https://www.researchgate.net/publication/341706555_The_crisis_in_neuroscience">The Crisis in Neuroscience</a>.</li><li><a href="https://www.researchgate.net/publication/277721485_Restoring_Purpose_in_Behavior">Restoring Purpose in Behavior</a>.</li><li><a href="https://www.sciencedirect.com/science/article/pii/S2589004221009160">Achieving natural behavior in a robot using neurally inspired hierarchical perceptual control</a>.</li></ul></li></ul>


<div class="lightweight-accordion"><span>Transcript</span><div class="lightweight-accordion-body">

<p>Henry    00:00:03    So how come neuroscientists have failed so badly yet explaining behavior? So obviously they’d been trying to do this for at least a century and a lot of smart people have worked very hard, but the result is I would say very disappointing. I think it’s healthy in the sense that, okay, so there are people who are perhaps mainstream and they believe that the paradigm is healthy. You know, they, they, they want to maintain the status quo, others, you know, people like me perhaps in the minority, but, um, we think there’s a crisis. Um, uh, we would like to start a revolution. Uh, I think it’s exciting and I’m quite optimistic.  </p>



<p>Speaker 0    00:00:54    This is brain inspired.  </p>



<p>Paul    00:01:08    Hello, it’s Paul. And today I bring you Henry in Henry runs his lab at duke university where he studies learning and behavior in rodents using techniques like optogenetics and electrophysiology. But that’s not why he’s on the podcast today. He’s on the podcast because he’s written a few pieces in which he argues that we need a new paradigm in neuroscience to explain behavior that essentially we are barking up the wrong tree, trying to study the brain like an input output device, which creates representations of objects in the world, and then programs the body to act accordingly. Instead, Henry looks to control theory and suggest that the brain...</p></div></div>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[












Support the show to get full episodes and join the Discord community.











Henry and I discuss why he thinks neuroscience is in a crisis (in the Thomas Kuhn sense of scientific paradigms, crises, and revolutions). Henry thinks our current concept of the brain as an input-output device, with cognition in the middle, is mistaken. He points to the failure of neuroscience to successfully explain behavior despite decades of research. Instead, Henry proposes the brain is one big hierarchical set of control loops, trying to control their output with respect to internally generated reference signals. He was inspired by control theory, but points out that most control theory for biology is flawed by not recognizing that the reference signals are internally generated. Instead, most control theory approaches, and neuroscience research in general, assume the reference signals are what gets externally supplied… by the experimenter.



Yin lab at Duke.Twitter: @HenryYin19.Related papersThe Crisis in Neuroscience.Restoring Purpose in Behavior.Achieving natural behavior in a robot using neurally inspired hierarchical perceptual control.


Transcript

Henry    00:00:03    So how come neuroscientists have failed so badly yet explaining behavior? So obviously they’d been trying to do this for at least a century and a lot of smart people have worked very hard, but the result is I would say very disappointing. I think it’s healthy in the sense that, okay, so there are people who are perhaps mainstream and they believe that the paradigm is healthy. You know, they, they, they want to maintain the status quo, others, you know, people like me perhaps in the minority, but, um, we think there’s a crisis. Um, uh, we would like to start a revolution. Uh, I think it’s exciting and I’m quite optimistic.  



Speaker 0    00:00:54    This is brain inspired.  



Paul    00:01:08    Hello, it’s Paul. And today I bring you Henry in Henry runs his lab at duke university where he studies learning and behavior in rodents using techniques like optogenetics and electrophysiology. But that’s not why he’s on the podcast today. He’s on the podcast because he’s written a few pieces in which he argues that we need a new paradigm in neuroscience to explain behavior that essentially we are barking up the wrong tree, trying to study the brain like an input output device, which creates representations of objects in the world, and then programs the body to act accordingly. Instead, Henry looks to control theory and suggest that the brain...]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 119 Henry Yin: The Crisis in Neuroscience]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[
<hr class="wp-block-separator" />



<hr class="wp-block-separator" />



<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2021/11/art-119-01.jpg" alt="" class="wp-image-1511" />



<p class="has-text-align-center"><strong>Support the show to get full episodes and join the Discord community</strong>.</p>



<div class="wp-block-image"><a href="https://www.patreon.com/braininspired"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2021/09/become_a_patron_button@2x.png" alt="" class="wp-image-1434" /></a></div>



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2021/11/yinnew.jpg" alt="" class="wp-image-1512" width="240" height="320" /></div>



<p>Henry and I discuss why he thinks neuroscience is in a crisis (in the <a href="https://www.amazon.com/gp/product/0226458121/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=0226458121&amp;linkId=c85752df380add9cf158431c769a8f2b">Thomas Kuhn</a> sense of scientific paradigms, crises, and revolutions). Henry thinks our current concept of the brain as an input-output device, with cognition in the middle, is mistaken. He points to the failure of neuroscience to successfully explain behavior despite decades of research. Instead, Henry proposes the brain is one big hierarchical set of control loops, trying to control their output with respect to internally generated reference signals. He was inspired by control theory, but points out that most control theory for biology is flawed by not recognizing that the reference signals are internally generated. Instead, most control theory approaches, and neuroscience research in general, assume the reference signals are what gets externally supplied… by the experimenter.</p>



<ul><li><a href="https://www.neuro.duke.edu/research/faculty-labs/yin-lab">Yin lab</a> at Duke.</li><li>Twitter: <a href="https://twitter.com/HenryYin19">@HenryYin19</a>.</li><li>Related papers<ul><li><a href="https://www.researchgate.net/publication/341706555_The_crisis_in_neuroscience">The Crisis in Neuroscience</a>.</li><li><a href="https://www.researchgate.net/publication/277721485_Restoring_Purpose_in_Behavior">Restoring Purpose in Behavior</a>.</li><li><a href="https://www.sciencedirect.com/science/article/pii/S2589004221009160">Achieving natural behavior in a robot using neurally inspired hierarchical perceptual control</a>.</li></ul></li></ul>


<div class="lightweight-accordion"><span>Transcript</span><div class="lightweight-accordion-body">

<p>Henry    00:00:03    So how come neuroscientists have failed so badly yet explaining behavior? So obviously they’d been trying to do this for at least a century and a lot of smart people have worked very hard, but the result is I would say very disappointing. I think it’s healthy in the sense that, okay, so there are people who are perhaps mainstream and they believe that the paradigm is healthy. You know, they, they, they want to maintain the status quo, others, you know, people like me perhaps in the minority, but, um, we think there’s a crisis. Um, uh, we would like to start a revolution. Uh, I think it’s exciting and I’m quite optimistic.  </p>



<p>Speaker 0    00:00:54    This is brain inspired.  </p>



<p>Paul    00:01:08    Hello, it’s Paul. And today I bring you Henry in Henry runs his lab at duke university where he studies learning and behavior in rodents using techniques like optogenetics and electrophysiology. But that’s not why he’s on the podcast today. He’s on the podcast because he’s written a few pieces in which he argues that we need a new paradigm in neuroscience to explain behavior that essentially we are barking up the wrong tree, trying to study the brain like an input output device, which creates representations of objects in the world, and then programs the body to act accordingly. Instead, Henry looks to control theory and suggest that the brain is basically one big hierarchical set of control loops, each of which is trying to control its input so that the input matches a set of internal reference signals. So control theory came out of the early cybernetics work, but Henry argues that they made a key mistake due to their engineering like approach.  </p>



<p>Paul    00:02:10    And the mistake was that they failed to consider that the reference signal, the signal that the system needs to control itself to obtain is generated internally by our autonomous biology instead, the cybernetics approach and in much of the rest of neuroscience, Henry argues places, the reference signal outside of the body in the hands of our experimental control, but we need to be looking inside. Okay. So that will make more sense when Henry explains it more. And I linked to his work in the show notes at brain inspired.co/podcast/one 19, where you can join the awesome group of Patrion supporters as well. Thank you, Patrion supporters, also just a quick announcement. I am finally going to be releasing my brain inspired course, which is all about the conceptual landscape of neuro AI. So all the topics that we talk about on the podcast, but in the form of a series of video lessons.  </p>



<p>Paul    00:03:05    So next week on my website, I’ll be releasing a limited time video series. And in those three short videos, I’ll discuss why this marriage between neuroscience and AI is so exciting. I’ll show some examples of what to expect from the course, some of the topics that the course contains and then the details of the full content of the course and how to purchase it. But the videos will only be available from November 17th to November 20th. So if you’re interested head to brain inspired.co/bi-workshop, like brain inspired dash workshop B I dash workshop. And I’ll put that link also in the show notes for this episode. And it’s during that time span, November 17th through 20th, that the course will actually be available to purchase while the video series is live. So check that out if it sounds interesting. All right. Here’s Henry Henry. Thanks for being here. So I know that, um, by day you are not a crisis counselor, but, uh, the main topic I suppose, that we’re going to talk about today is, comes from a chapter that you wrote, um, in a book on perceptual control theory and the title of the chapter is the crisis in neuroscience.  </p>



<p>Paul    00:04:15    Um, so there’s a lot to talk about actually, it’s really interesting stuff, but, uh, by day, uh, can you tell us a little bit about what you do on, in the empirical side?  </p>



<p>Henry    00:04:25    Uh, yes, by day I’m a systems neuroscientist. Uh, I work on the role of the basal ganglia circuits and behavior, um, in particular instrumental goal-directed behavior. And I use mice for the most part in my research. Oh yeah. So I’m an experimental neuroscience.  </p>



<p>Paul    00:04:47    Was it your research that brought you to think about these things that you write about you’ve written been writing about these topics since at least 2013? I’m not sure if you wrote about it earlier as well.  </p>



<p>Henry    00:05:01    Uh, yeah, I’ve been thinking, I’ve been thinking about these topics for probably, uh, since graduate school. So for probably 20 years or so. And I started writing about, um, uh, as you said in 2013 or a little earlier than that. Um, yeah, so it’s been awhile.  </p>



<p>Paul    00:05:25    Okay. Well, uh, let’s not wait any longer than, um, what is, and, and there’s a lot to unpack, so I don’t expect you to summarize the entire chapter here, but can you give us the overall picture of what the crisis in neuroscience is that you write about?  </p>



<p>Henry    00:05:40    So I used the word crisis, um, in a Kuhnian sense, those based on, um, Kuhn’s book con scientific revolutions. So the idea is that as we all know, there’s, um, uh, scientific, which is a set of common assumptions that most scientists in the field, uh, take for granted. And, um, when you have a crisis, uh, it’s usually due to discrepancies between, uh, new observations and the accepted model, uh, these assumptions that everybody accepts. So then the idea is that you can either maintain the status quo or you can start a scientific revolution. Um, so that sort of the, the nature of the crisis in general for, um, in science and, uh, the question that I raised in the chapter was so how come neuroscientists have failed so badly at explaining behavior? So obviously they’ve been trying to do this for at least a century, and a lot of smart people have worked very hard, but the result is I would say very disappointing after a century of work, there is no accepted model of any behavior.  </p>



<p>Henry    00:07:14    Um, the things that we have learned about the brain don’t seem to explain how behavior works. So that is, um, disappointing and surprising in my opinion. And, um, so I think that’s basically the crisis. Um, um, I think the reason of course is not because the brain is too complicated. It’s not because as people normally say, you know, the brain is the most complex object in the universe and therefore it will take forever to understand how it works. I don’t think that’s the reason, although that’s the, you know, the common excuse. So I think the problem is that the, the, the accepted paradigm in neuroscience and in psychology is wrong. And I will call this paradigm, the, the linear causation paradigm in which essentially you accept that the organism X receives inputs and generates outputs, the input is sort of sensory in nature, and the output is motor.  </p>



<p>Henry    00:08:29    So the output is behavior and there is a causal relationship so that the inputs are somehow responsible for the outputs. And so the goal in neuroscience is simply to discover the function that will, um, that will link the inputs with the outputs. Okay. So the input will be the cause the output would be the effect. And, um, according to this paradigm, the, the brain or the nervous system is somehow respond responsible for a sensory motor transformation. Uh, it will, you know, compute various things. It would probably take many steps, but somehow, uh, the, the product is your behavior. And, um, so I argue in the chapter that this assumption is basically wrong, and that’s the reason that people have failed to explain behavior. It’s not because the brain is too complex.  </p>



<p>Paul    00:09:33    How did you get into control theory?  </p>



<p>Henry    00:09:37    Yeah, a good question. So I talked about control theory as an alternative explanation. So that is the, the model that I would use to explain behavior because there is only one class of systems in the universe that actually does not obey this kind of linear causation, um, um, model. So cause effect, explanations do not really apply when you have a closed loop, a negative feedback control system. And that’s why I talked about control theory,  </p>



<p>Paul    00:10:22    But it’s interesting. And we’ll, we’ll talk more about, um, the control theory approach. Uh, you do an analysis on the cybernetics of old Norbert Wiener and company, and, uh, describe what they got wrong. So, um, I mean, cybernetics, it seems like is having kind of a comeback, but, uh, I suppose you’re worried that it’s coming back and it’s still wrong. What, what, what was, uh, what’s the difference between that old cybernetics approach, um, of control theory? What did they get wrong? What were they missing that, uh, that you argue for?  </p>



<p>Henry    00:10:58    Well, I think, uh, let’s start with the basic control loop model, right? So the control, um, the basic control loop is quite simple. In fact, I think it’s simplicity is part of the problem because everybody assumes that they understand it when they don’t. Um, so the basic control has essentially three components. You have this input function, you have a comparison function, you have the output function. And, um, the comparison function will take the input and compare it with, uh, some reference input, reference signal, and generate, um, you know, the error signal, which is really the discrepancy, the difference between these two signals and then the signal is used to drive the output. And, um, if there negative feedback, then the output will have a certain effect on the input. And so that closes the loop. I think the problem with, um, cybernetics and with, you know, with Wiener’s model is he was actually under the influence of the linear causation paradigm.  </p>



<p>Henry    00:12:14    And, um, so his approach was very much the standard engineering control theory approach. Um, and the problem is not the, you know, with the, with the mathematics, with the equations of control, the problem is what I would call, um, a systems identification problem, where they are accustomed to treating, um, the reference input as the input to the controller. So, in other words, if you are a user of some serval system than the, you know, let’s say you’re using, for example, a thermostat, then of course, you, as a user, you would set the temperature. And it seems like the setting, when you set the temperature, that’s the input to the system. And of course the output is whatever the AC will do to control the temperature. Now, this is quite misleading because the reference signal in a biological organism is actually inside the organism, right? Literally it’s inside of your brain. It’s not something that you can inject into the system as if you’re gone.  </p>



<p>Paul    00:13:33    Um, but we do generate those signals, those reference signals in a sense we autonomously generated.  </p>



<p>Henry    00:13:40    So the key is autonomy. Um, the key is that the reference signals are extremely important, um, in control systems and they must be generated, um, within the system. And that’s actually, uh, according to my model, at least that’s the job of the nervous system. So essentially what you have is a hierarchy of neural control systems, which can generate different reference signals at each level. And these reference signals are usually changing all the time, uh, with the exception of a few or relatively few homeostatic control systems, which are important for things like body temperature.  </p>



<p>Paul    00:14:33    Yeah. So I guess the big picture is that you conceive of the brain or brains in any species, I suppose, as a set of hierarchical, uh, control systems with, um, each level having its own reference signals. Right. I that’s straightforward to understand, um, for homeostatic, uh, mechanisms, like you mentioned, uh, our internal thermometer, right? Our, our, uh, our temperature reference signal. Um, but then you extend it to behavior, right? So you make the comparison between the classical, uh, neuroscience, which you model, which you’ve already described, where we see something happen. Uh, and then we have some sort of internal representation or model of that thing. Uh, and then we act on it. But in, in your scheme we have no internal model of what we’re, uh, acting on or, or do I have that wrong? Do, do you have room for a model, um, that’s generated through these hierarchical control processes or is it control processes all the way down or  </p>



<p>Henry    00:15:41    Great question? Um, I think first we have to be clear on what a model is just talking about representations, then yes, we have representations, we need representation. Um, but I think, uh, what you’re talking about, um, it’s very common in the field of motor control where they’re using all these models, um, which in my opinion are completely unnecessary and their model, their models are actually very detailed models of the external environment. Right. Right. And that’s actually due to a mistake in the analysis of the interaction between the system and the environment, um, which I mentioned in the chapter. So this is actually a direct consequence of the cybernetic model, uh, this need for just sort of computing the, the environmental properties. I think it’s imaginary. Um, and it doesn’t work very well in practice. So for example, in robotics, if you rely on this kind of inverse and forward computation, um, the computational challenge would quickly overwhelm you. And that’s why we still don’t have very good  </p>



<p>Paul    00:17:07    Robots, especially when the wind blows.  </p>



<p>Henry    00:17:11    Yes, exactly. Especially in any kind of unpredictable environment when the, when the disturbance cannot be, uh, you know, computed ahead of time.  </p>



<p>Paul    00:17:24    So we’ve talked about homeostatic mechanisms and, and you’ve talked about the motor domain. Um, do you think of, I guess my question is how much of, well, let me back up, it seems that there is room, and I know you said the brain is not too complicated, but it is pretty complex. And it seems like there’s room for a lot of different kinds of representations and a lot of different algorithms being run. And I’m wondering how much of the brain do you see as devoted to, uh, control systems?  </p>



<p>Henry    00:17:59    Well, first the principle is that in a control system, uh, what is controlled is the input and not the output. And if you accept that, you have to understand that, um, basically you can only control what you’re able to sense. So that means to the extent that, you know, you, you need to control some complex variable, you must have a fairly good representation of it. And so that means, uh, uh, the input function. So I, I mentioned earlier that you have three components. I would say that the input function is by far the most complex part of a control system in the brain. Um, the comparison function is relatively trivial because you’re just doing the subtraction.  </p>



<p>Paul    00:18:52    Yeah. You found that there’s a linear relationship,  </p>



<p>Henry    00:18:55    Right. And, uh, and the output function, there could be some complexity, but for the most part, it would probably involve some integration or maybe differentiation or a combination. Um, but the input function is tricky because you do there, you do need to represent, uh, whatever variable that, that you’re trying to control. A simplest example of course, is temperature. And that’s not a big deal, but of course, if you trying to represent, if you’re trying to control something more complex, for example, you know, if you’re trying to follow someone that you have to represent this person somehow, right. Um, and that’s not as easy, um, because you know, the sensory representations and the higher level object in very in representations are needed. I think essentially then you just do perform control on that variable.  </p>



<p>Paul    00:19:57    And then, so as that person ducks around the corner, you, uh, actively try to, um, I guess your reference signal would be to have them in zero degree line of sight right. Directly in front of you, let’s say, and when they turn a corner, uh, your brain’s job, uh, as a control system then is to move your body so that it returns it to a direct line of sight.  </p>



<p>Henry    00:20:20    Exactly. And, and, and then you, you would have problems if, for example, there’s, um, something that’s blocking your view. Right. You might need some memory.  </p>



<p>Paul    00:20:33    Yeah. Right. Well, yeah. So first of all, watch out people, uh, if, if Henry’s following you, um, it’s really just a scientific experiment. It’s, don’t be creeped out, but so you just mentioned memory. And what about the case where, uh, and, you know, I don’t want to, uh, Badger you, uh, all day with this kind of questioning, but, um, you know, what about the case where you’re not actually following the person, but you have to imagine where they might be going based on, on their history, right. Or, you know, they love ice cream. So you imagine they’re going to the ice cream shop. Um, but, and you have to sort of, then you have to, do you not have to then represent, let’s say the path to the ice cream shop, right. To, uh, to then close in on that person. Is that still a control problem? Do you see that as a control problem? That that is just a hierarchical version of it?  </p>



<p>Henry    00:21:25    I think what you’re suggesting is that can you predict where they they’re going to be the future and then act accordingly? Of course it’s a control problem, but I think that the difference is that, you know, the, you don’t, you’re not getting direct sensory input. Um, but instead you’re trying to predict based on your experience and learning, um, where they’re going to be, uh, that’s a slightly different problem, but in principle, the control problem is still the same, but the prediction problem. Yeah. I mean, I think the prediction problem, it’s not excluded from the model. I would say that, you know, in the control hierarchy, you do have so-called imagination, which is basically when a control loop is able to send its output to it as to its own, um, input function without going through the world really environment. So that’s called imagination. Um, and then, you know, for these higher functions, you do need memory, as I said, so I don’t, I don’t, I think these can be viewed as additional functions that you add to the control inward to help you control. But to be honest, I’m currently, I’m not concerned with these questions because I think they are sort of advanced, they’re not necessarily difficult, but I think it’s more important to understand the basic function of the nervous system, which I argue is to control various inputs. That’s my perspective.  </p>



<p>Paul    00:23:10    Yeah. So this is in some sense, this is a unified, uh, grand theory of the brain, I suppose.  </p>



<p>Henry    00:23:16    Uh, yes, indeed.  </p>



<p>Paul    00:23:18    Okay. All right. Great. So, uh, you mentioned the word, um, prediction, and you were, you were talking about that for a moment there. So I want to go ahead and I’ll interrupt us and, um, play you a question from a listener actually. Uh, this is the person who recommended you come on the podcast. So then you can answer the question and then we’ll get back on track here.  </p>



<p>Henry    00:23:39    Okay.  </p>



<p>Speaker 3    00:23:41    Hi, Dr. Yan, my name is Jeffrey Short and I’m a mechanical engineer. Who’s just started the field of neuroscience. I really appreciate the thought provoking perspective. You shared about chapter as I try to get oriented to the field. My question is around the potential role of prediction in hierarchical control system model, the writing, as I’m sure, you know, there are other models involving minimization of error resulting from comparison of top-down and bottom-up signals. Many of the other ones I’ve seen so far though, emphasize prediction. For example, Paul recently had a Neal on the podcast. We spoke about a predictive control based model. I didn’t see any mention of prediction in the model you put forward to the chapter though. So can you comment on why you favor a model that doesn’t emphasize prediction? And if there are any experiments that could be, or have been done to lend credence either. Thank you.  </p>



<p>Paul    00:24:25    Do you feel like taking a stab at that?  </p>



<p>Henry    00:24:28    Yeah, I suppose I can do that. So, yeah, as I said, I’m not against prediction. I think there’s a role for prediction in this type of model, but what people often call prediction is actually not prediction. So at least it’s not achieved by predicting the future. What people are usually talking about is can be achieved at least, um, by controlling a different set of variables. So in other words, what through learning what you’re trying to control, uh, changes. Okay. So you’re trying to control another aspect of the environment that of course is maybe causally related to the, to the variable that, um, initially you were trying to control. And the classic example is Pavlovic conditioning where you have, you know, uh, let’s say a bell and, um, food, right. Um, meat powder. And there, I think what’s happening is that you are reorganizing the input function of the control system, so that you’re no longer trying to control for the impact of the food on your, you know, in your mouth, um, dry meat powder. So normally you have to salivate, um, and instead, uh, you, you know, the input function now is incorporating the auditory input. And so whenever the auditory input in the neutral stimulus is presented, now you’re turning on this kind of, um, meat powder, controlling system.  </p>



<p>Henry    00:26:14    So that’s a very good example of prediction and people traditionally have, um, viewed Pavlovian conditioning as sort of a simple example of prediction, right? And there, there are a lot of models that tend to, uh, explain Pavlovian conditioning. Uh, but I think according to my analysis, it’s really representing an attempt to control a different aspect of the environment. Um, so I’m not against prediction, but I think, um, there is a very important alternative that people have not really thought about, which is, um, just online control of a different set of sensory variables that are actually predictive. You know, so that’s, for example, when you see a dark cloud, then you turn all your avoidance, you know, control systems in order to avoid the rain. Right. Um, but that’s after learning the causal relationship, the predictive relationship between the cloud and the rain.  </p>



<p>Paul    00:27:21    So in essence, um, you spend your life learning a large part of learning is generating new reference signals or, and, or adjusting your reference signals. Do I read you correctly?  </p>



<p>Henry    00:27:35    Yes, that’s correct.  </p>



<p>Paul    00:27:37    So in the, in the chapter, you talk a little bit about how we can, uh, move forward, uh, without giving, you know, a full blown, uh, research program for instance, but, and I know that, you know, in your own research that you’re using these principles and applying them to study behavior. So I’m wondering if you could just summarize what you think the way forward, uh, is,  </p>



<p>Henry    00:28:02    Well, you ask very difficult questions. So my vision for the future of neuroscience, in other words,  </p>



<p>Paul    00:28:12    Well, because you outlined like first steps in the chapter, right? And, and some principles that you could follow and, you know, essentially you give three, three steps of what we’ll need to be looking for and how to test for control variables, right?  </p>



<p>Henry    00:28:27    Yes. I think to begin with, we have to first identify the control variables and, um, I would start with very basic variables that, um, are not learned or perhaps don’t require too much learning because they’re easier to study. Um, and then you have to apply the test for the control variable being ordered to study those. And of course, then you would have to discover the different components of the control system and how they’re implemented by the brain. Yeah. So the tests for the control variable is simply a test that is mandatory when you’re analyzing biological control systems and you first have to come up with a hypothesis about what the control variable might be. And because we know that, um, the output of a controller will systematically mirror the environmental disturbance. So once you know, what the, um, control variable might be, then you can introduce disturbance to that variable so that, um, you would see if there’s any resistance from the control system. And, uh, if you’re correct, then of course you would have compensatory, um, outputs that will resist the effect of the disturbance. So you basically, you’re, you’re applying disturbances that would affect the, uh, control variable as if it were not, um, if it were not under control. And if you’re experiencing some sort of compensatory output, then that is probably the right control variable. If not, you have to start over and repeat this whole process. So you have to come up with a different, uh, hypothesis about the control variable.  </p>



<p>Paul    00:30:28    So, uh, yeah, so initial steps toward, uh, a whole new neuroscience, but one of the things that you write, uh, in the chapter toward the end is if the above analysis is correct than a disturbingly large proportion of work on the neural substrates, that behavior will have to be discarded. So is the chapter being well-read and if so, what kind of feedback are you getting from the neuroscience community and or other communities? Oh, no.  </p>



<p>Henry    00:31:03    First of all, I don’t think many have read the chapter, but obviously you have, and, um, you know, I get this uncomfortable feeling that maybe after this more people will read it, um,  </p>



<p>Paul    00:31:17    Likely so, but you, you wrote it, so it can’t be that uncomfortable. Right,  </p>



<p>Henry    00:31:22    Right. But you know, these book chapters, they’re not usually read by a lot of people. Okay. And, uh, so, so far I haven’t received much feedback, um, from other neuroscientists, at least I’m not sure if most of my colleagues, even though that I wrote this. So, uh, it’s hard to anticipate what people might say. I don’t know. I mean, what is your reaction? You were once upon the time you were a neuroscientist.  </p>



<p>Paul    00:31:52    Right. And, um, w we could use some of my own work as, uh, needing to be discarded, uh, for example, under this, uh, um, proposed paradigm. Um, so I have multiple thoughts. I mean, so that’s, that’s why I asked you how much of the brain do you think is devoted to this control aspect? Because it’s hard to reconcile, for example, what I consider my own rich subjective experience. Right. My thoughts and my, uh, my imagination. It’s hard to reconcile that with a control, uh, system approach. So it’s, it seems like there needs to be, and I don’t know how you get from a hierarchical control system to what I consider my fairly rich subjective experience. Do you, do you see a path forward through that? And you know, that, that’s just one example. There are, of course other examples, like, you know, different areas of the brain being devoted to different cognitive functions, et cetera. But, but to you, these are all in the service of control.  </p>



<p>Henry    00:32:58    Right. Um, I’m not sure exactly which aspects of your subjective experience is  </p>



<p>Paul    00:33:06    Well, like right now I can  </p>



<p>Henry    00:33:08    Control, I, I’m sure you’re talking about a lot of sensory experience. Like you, you see that desk over there, you’re not actively trying to control it. Right. Um, but I will say that, well, yeah, I mean, this seems like your sensory system can provide you with a lot of options and each of those perceptions might in principle be controllable. Um, so remember the principle is that you can only control what you can perceive. So w you know, of all the perceptions that you have right now, it could be a very rich, subjective experience. Um, I’m not you, of course. So I don’t know for sure. Um, and, uh, some of them could be controlled and, um, and of course we can demonstrate that. So the question is really what happens when you try to control one of these perceptions. So, for example, if I don’t like that desk, it’s offensive, I can turn around or I can walk out, or, you know, or if the temperature in the room is too low, if you called the, you can leave, or you can put on the sweater, and these are all behaviors that, um, I think are generated by control systems.  </p>



<p>Henry    00:34:30    Um, but I’m not sure if, um, the richness of your subjective experience per se is incompatible with the control hierarchy. Anyway.  </p>



<p>Paul    00:34:42    W what about, um, let’s say let’s okay. So I know that these words are fraught, but, um, the concept of mental representation, right. So I can close my eyes and we talked about imagination, uh, earlier I can close my eyes and I can imagine my future house, a giant mansion on a hill, um, you know, in Costa Rica or something like that. So it’s that kind of subjective experience as well, just being, um, it, it, it feels like, uh, I have a rich representation of not only, you know, my immediate perceptual experience, but of possibilities, and I can, you know, uh, memories, um, et cetera, those feel like they are mental representations. And, you know, the concept of representation I know is, uh, uh, philosophically tricky as well.  </p>



<p>Henry    00:35:33    Actually, I don’t think representations are tricky any way. Um, uh, I think they’re just literally true because you have signals in your brain that represent things, uh, including this big mansion. You know, for example, if that’s a real goal that you have, and the, the you’re working very hard, you’re interviewing all these people, and let’s say your podcast becomes the most popular show. And then of course you can reach that goal, right. If, if you were actually doing this for, for that, for that house now. Um, so in that sense, yeah, I mean, I think goals, especially in humans could be relatively, um, abstract and fancy. Um, but that in itself, I think is sort of independent of whether you, you can exert control over it. In fact, I think, you know, some goals, obviously you do try to control, and that’s the definition of goal directed behavior, but the behavior is just a control process. And we say that because you always control you go, you always comparing, you know, your ongoing inputs with your desire state. So let’s say you imagine that you’re, you know, there’s this nice house that you like, but your current house is too small and that’s something that you’re working towards. So that’s what I mean by a control process. Um, you know, whether you can imagine something 40 years from now, or have fantasies about, you know, anything in the world, I don’t think that’s so relevant because that in itself does not falsify any control model.  </p>



<p>Paul    00:37:20    I guess my recurring question is just, you know, how much to think about, um, the brain’s functioning as being devoted to control processes.  </p>



<p>Henry    00:37:30    As I said, I think the input function is the most complex part and all these rich representations that you mentioned are really part of the input function. Even when you imagine things, uh, you using the perceptual channels, they’re just vague sort of perceptions. And the, the source is not coming is not in the external world, but coming from your own brain. Right? So that’s the major difference. And that’s why imagination imaginary inputs and actual perceptual inputs will compete because they use the same perceptual channels at the higher levels. So that’s why, when you’re daydreaming, you can’t perceive what’s in front of you. Um, so I think that actually supports the idea that, um, um, you know, even imaginary, um, imaginations can be used as some sort of input to a higher level of control system.  </p>



<p>Paul    00:38:29    Uh, I actually, I buy that, um, before we move on the word, you, you drop the word teleology, uh, in, uh, the chapter. And I believe you’ve used it in the past as well. And actually my last guest, um, Yohanis Yeager talks a lot about how we need to, um, and a lot of other people seem to be, uh, talking about this, although this is my bias, I suppose, as my own interests have taken me down a path that is, that is, uh, uh, crowded with, uh, T um, teleology, um, advocates. Um, but can you talk a little bit about why, uh, we need, uh, to reinvigorate the notion of teleology and accepted as a valid, uh, scientific concept?  </p>



<p>Henry    00:39:17    Yes. I think teleology simply means goal-directed, uh, so of course there’s a long history of teleology, but, you know, tell us is basically the goal or end state. So that has always been a dominant concept used to explain behavior, but I think something happened, you know, after Galileo and Newton. So in physics, um, modern scientific revolution, right? The first scientific revolution, the findings of Galileo and others, um, appear to falsify this notion of teleology because the Newtonian physical laws do not contain any element of the final cause. So final cause is that for the sake of which, um, so for example, you know, Aristotle’s example was, um, I’m running in order to become healthy, right? And so, uh, what follows in order, um, in other words, the state of being healthy is the goal or tell us. And, um, so that’s the purpose of your behavior and your behavior is explained, um, by this purpose now, according to modern physics, that can’t possibly be true because, um, again, as I mentioned, if there’s linear causation, so, uh, F equals I made, there’s no final cause there.  </p>



<p>Henry    00:40:56    So it seems like the, everything in the universe can be explained by these simple physical laws, and you don’t need  to explain behavior, and therefore people will have reached a conclusion that you have to abandon teleology. In fact, the whole history of modern psychology and neuroscience is the history of, uh, various attempts to, uh, abandoned teleology, both in the vocabulary and also in the, you know, the mechanistic explanations. Um, and so in my chapter, I argue that this is simply wrong. This is a huge mistake because, um, it’s very simple, it’s that teleology is the main property of control system. So you have everything in, you know, in the, in the universe follows, basically follows Newtonian laws, okay. With some minor exceptions. But the problem is that there is this class of systems called feedback control systems, which are sort of the exception in that you have to use circular causation to describe their properties because, um, um, at the same time that the input is affecting the output, the output is also affecting the input actually.  </p>



<p>Henry    00:42:21    And, uh, and the way that the output is affecting the input is quite different. Um, but there’s certainly simultaneous. And because you have these two equations, um, things are changing simultaneously. You can’t use linear causation to describe the, um, properties of this type of system. So that’s the exception. So basically that means that in physics you study, you know, uh, open loop or things with no feedback, if you will. Um, and, and in biology, everything as feedback, everything is Tilly teleological. Um, so I would say that, yeah, in that sense, Aristotle was right. There is final. Cause as long as you’re talking about trust system first, he didn’t know how it worked. Right. So that’s the distinction because the way that it was used by people like Aquinas and a lot of people in the history was to S to, to argue that, okay, this is sort of a religious argument, right?  </p>



<p>Henry    00:43:30    So this is the basis for, um, how God is all knowing and, uh, knows the purpose for everything on earth. And the reason that the rock is falling is because the God, you know, God intended the rock to fall, right? So that’s sort of another type of misunderstanding in my opinion. Um, and, and that’s also why there is this conflict between the so-called scientists and people who believe in teleology, um, because teleology is considered to be unscientific, right. Um, so I’m not advocating teleology per se. I’m just saying that, uh, the properties of teleological systems are basically the properties of controls systems. Um, and, uh, if you, if you think that the nervous system, uh, is a control hierarchy, then obviously you have to agree that it’s teleological because, um, yeah, but it’s literally true because the way these things run is that you need this internal state, that this internal reference signal to be there first, before you can generate the right behavior to reach the desired state.  </p>



<p>Paul    00:44:49    Okay. So, um, the telos is not the reference signal per se, but it is the, uh, end result of controlling for the reference signal. Is that fair to say?  </p>



<p>Henry    00:45:00    I think before you understand, um, control systems, you always get confused about the consequence and the, and the purpose as if the sort of the same thing, but of course there are not because one is just the signal inside of your brain and you, you might fail you being, it’s not like you’re guaranteed to succeed in your attempt to control. Right. Um, and of course, this also explains the difference between accidental and intentional behavior and all that. Um, so I think, yeah, traditionally people get very confused about the concept of purpose, of consequence of goal, but once you understand control systems, it’s not a big deal. It’s very straightforward. So anyway, that’s just my take, I know that a lot of philosophers will have a problem with this.  </p>



<p>Paul    00:45:53    Yeah. Oh, all right. Great. I liked the attitude, um, before I, I, you know, I want to ask you about AI actually. Uh, but, um, one more thing on the neuroscience side, or at least one more thing we can talk about more, if you like, um, thinking about these circular causation in biological autonomous agents, uh, one of the things that you advocate is that actually what we need to do is instead of studying, you know, 40 different, uh, animals and averaging their behavior, or looking for effects through that, that what we need to do is it would be more fruitful to study one individual, but to do it for a long time and, uh, study it continuously in a continue on a continuous timeframe because of the circular causation, because, uh, the inputs are affecting the outputs and the outputs outputs are effecting the inputs because of the closed loop control circuit.  </p>



<p>Henry    00:46:47    Yeah. That’s a tough question. So traditionally, as you know, very well, uh, for example, a monkey work  </p>



<p>Paul    00:46:53    Two is the golden number in monkeys. Yeah.  </p>



<p>Henry    00:46:57    Yeah. So it’s funny because a lot of people, a lot of neuroscientists, they like to criticize a monkey research because the end is too low. Right. I hear that a lot. They use two animals or maybe three monkeys. And how can you believe the data? Because there are so few animals, um, I think that’s completely misguided because it’s not really the number of animals it’s actually in the way, it’s the amount of data that you collect. And the more importantly, the quality of the data. So in the traditional analysis, you’re basically doing some sort of input output analysis. You’re manipulating the input because the input is so-called, uh, independent variable. And then you have output behavior output, which is dependent variable. So you’re always testing the effect of variable X on measure, Y essentially. Right. Um, and, uh, so what you’re trying to identify is the function that will connect these two.  </p>



<p>Henry    00:48:01    So if I vary the amount of reward, what happens to the firing rate of the cell, right? That’s kind of, um, or if I manipulate the attentional demand of some task, what happens to the firing rate and that sort of research, and this is difficult. And, uh, yeah, if, if I, if what I suggest is correct, then all this work is, um, not worth your time. And the problem is of course that the, the, the variable that you’re manipulating is, um, not necessarily the input now, usually people are trying to identify some effective stimulus, but the effective stimulus, um, as traditionally defined as something that will rely, really produce the behavior that you like to just study. And then in reality, it’s actually not input from the perspective of the organism, but it’s the sort of the inferred input from the perspective of the scientist observer on the third person perspective.  </p>



<p>Henry    00:49:18    And that’s very dangerous because there is a, um, illusion, uh, what I called, what I think bill powers called, uh, the behavioral illusion, which is that. So basically if you treat the disturbance, which is the input from the, from the, from the eyes of the observer, as the input and the behavior as the output, and it looks like you have identified the organism function or the neural function, that’s expressing the behavior of outdoor neural output as a function of the, uh, input, right. But that’s the illusion, okay. This is not true. In reality, this function does not describe the property of the organism. It actually describes the environmental feedback function. It’s mirroring the environmental feedback function. So when the disturbance is considered the independent variable and the output, the dependent variable, um, the, this function that you discover is not the real input output function, whenever there is control, it actually reflects the inverse of the environmental feedback function. I know that that’s not very easy to understand, but basically, um, you, what you think is a property of the nervous system, if you use this approach is actually a property of the environment, right? So this is probably the most vicious.  </p>



<p>Henry    00:50:50    Yes. The vicious trap in the history of neuroscience.  </p>



<p>Paul    00:50:56    All right. So, um, anything else from the chapter? So, you know, we didn’t cover, you actually give a lot of examples from history. You talk about Sherrington, um, sharing yeah. Sherrington and, uh, his experiments and Adrian, and lots of people from the history as well. Um, give me examples of, um, how they, um, how some famous people got it wrong from this perspective. And, uh, there’s a lot more in the chapter. Did we miss anything that you think we should cover here? Or do you think you’ve, um, dug yourself a deep enough hole?  </p>



<p>Henry    00:51:32    Oh, uh, I think one of the things I suggested if I remember correctly, uh, for future research as this, um, the concept of using continuous measures. Right, right, right. I think you mentioned that. Um, and just, sorry, I have to use the monkey experiment again, you use the monkey example, as you know, you do chair training, the monkey is, is restrained. Um, and then usually only a limited set of behaviors are measured. Um, let’s say hand movements, are you pressing a button or moving a joystick or eye movements cards? Um, but the most important problem, the most important limitation is that the measures are discrete events. There are timestamps. And then what you do is you require your, a single neuron activity. You get the single units and you plot these parent event histograms. Right. I’m sure you did this. I’m quite familiar.  </p>



<p>Paul    00:52:33    Yes.  </p>



<p>Henry    00:52:35    And so, so there’s this strange assumption that essentially the only thing that matters in behavior are these timestamps, these events, which are actually of course created by the scientist. It’s not, uh, I think a reflection of the actual behavior of the animal it’s, whatever the scientist, um, considers important or relevant in this particular behavioral task. And then what you’d look at is the neuroactivity before or after, or P you know, Perry, uh, this event, and then you reach conclusions based on various manipulations. Right. Um, so I think that is very problematic and this has nothing to do with, um, the theory control or, or anything like that. I’m just saying that this is a clear limitation of, um, the experimental approach that you’re not even attempting to measure behavior. Um, so I think that’s a big problem because, um, traditionally, uh, whenever you look at the relationship between neuroactivity and behavior, you use this kind of approach and your conclusion, I think, is going to be very limited because you’re not looking for, you know, you’re not measuring behavior continuously.  </p>



<p>Henry    00:54:03    You might be recording your activity continuously. So for example, in our work, one of the things that we found was that when we measured behavior, uh, behavioral variables continuously, for example, kinematics, um, and we actually allow the animal to move, then there is a remarkable linear relationship between the neuro activity between the firing rate and kinematics and this kind of correlation as much higher than anything ever reported in the history of neuroscience. So I think that in itself is a major discovery is the nature of this correlation, because it’s completely unknown. Um, you understand that for many decades, neuroscientists have been trying to find a relationship between your activity and behavior, right? And for the most part, they failed whenever they come up with a correlation or coding or in coding. So to speak, the relationship seems, um, quite, let’s say subtle. I mean, there is no clear relationship.  </p>



<p>Henry    00:55:11    The correlations are low. And in part, because of these failures, they have largely given up, right. But our results suggest that in fact, every time you measure behavior properly, there is a remarkable linear relationship between certain variables, behavioral variables, um, and the neuro activity. And this is not that surprising because behavior is continuous. Um, even though we might represent behavior, um, as discrete events at a very high level, for example, that might be, what’s what you’re consciously aware of. That’s not necessarily the case. Um, when you’re measuring the actual behavior generated, there is a duration, there’s a start, you know, it starts at some point, it takes some time and then it stops, right? So calling this a discrete event, I think is misleading. And at least our results show that you can get very interesting data. If you simply measure, if you attempt to measure the behavior. And I think once you get these, um, novel results, then you have to, um, explain them, right. So how do you explain the fact that you have newer activity that actually slightly precedes the kinematics that’s achieved by the body? And it’s basically a direct representation of something that hasn’t been achieved, but is, you know, with the short lag it is being achieved by your body. Right. So how is that possible? How do you achieve the desire positions if the signals are not literally the representation of the descending referencing?  </p>



<p>Paul    00:57:07    So Henry, uh, by the way, I love, I love the chapter. Um, and I, you know, also recommended of course to everyone else. Um, can I ask you about what, how this relates to current artificial intelligence is? So on the one hand you have reinforcement learning, um, and in this sense, this is, um, and I know that you’ve made robots or a robot, um, using, and I’ll link to that paper as well, using this kind of control theory approach and the robot, uh, I know, is made of very cheap parts, but actually performs really well in this continuous manner. And it’s a system of hierarchical control processes. What I’m curious about is how this kind of approach could help inform, uh, artificial intelligence.  </p>



<p>Henry    00:57:52    It’s a great question, but it’s too big. Um, probably require a separate session, I guess the short answer is that, um, yeah, I don’t, I don’t think current AI is very useful. And so the main problem is actually the same problem that I talked about before. Um, so for example, reinforcement learning is just another example of the classic paradigm, uh, is an attempt to do, um, to explain teleology without using teleology. And so that’s why reinforcement of the concept of reinforcement is circular. So actually that’s sort of my background. I did learning theory, reinforcement learning. So, so we can talk about that in the future, maybe. So, so yeah, obviously there are limitations there. Um, I will say that, um, what people don’t realize is, um, how bad these systems are, like how bad current AI is, how bad, uh, reinforcement learning is. Uh, and that’s because they never think about the computational power and, you know, the energy environment and things like that.  </p>



<p>Henry    00:59:09    Um, so obviously there has been progress, so it was better than let’s say, um, you know, 20 years ago. Uh, but I think a lot of it, a lot of the, uh, the progress is just in computational power or if you basically used computers from 20 years ago, as you’re forced to use those computers run these, um, you know, the current AI, it just wouldn’t work. Um, and, but, you know, I, I don’t think for example, that the biological brain has a lot of computational power. It’s significant, but it’s not even close to what, you know, these digital computers can do. So I think in a way, the current approaches in AI and robotics are irrelevant. Um, but, but again, that’s why I, I don’t know if I’m comfortable talking about that. It’s a really big question. It’s complicated. And again, you know, I don’t want to offend everybody. Of course there are AI people who care about efficiency, but they just don’t have enough constraints.  </p>



<p>Paul    01:00:24    Do you know whether there are, uh, AI competitions that respect, um, power usage, power consumption, and sort of normalize for that, or where there, you know, like, um, not that you can aim to, uh, use a system that uses the same amount of power as the human brain, for instance, right. Or something like that. But there could be,  </p>



<p>Henry    01:00:47    I should, honestly, I think that you should try to do that. And if you have such a constraint, then you probably come up with smart, um, design. And so at least, you know, something in the ballpark, I would say. Yeah. Which is interesting because everybody cares about energy these days. Right. Um, but you know, AI is where they don’t care about electricity. So, so let me put it this way, uh, in terms of AI and robotics, when you can ask any expert in robotics, whether it would be trivial to build, let’s say a robot with more than 50 degrees of freedom, and I guarantee you that they will say that it’s extremely difficult. At least if not impossible, to my knowledge, nobody’s done it, but using our approach, it would be trivial. That’s the major difference. And it doesn’t even require much computational power. It doesn’t require anything that’s significantly different from, you know, what we used in the published paper. Um, but that’s, I can tell you, but you can ask an AI expert or a robotics expert or how difficult that would be. And, um, and I imagine they would have that. It’s impossible.  </p>



<p>Paul    01:02:18    All right, Henry. Um, there’s been a lot of, uh, pessimism I’ll say, right, but I want to come back to, um, as, as a last moment, I want to come back to Kuhnian, revolutions and crises, right. Because on the one hand crisis, that sounds bad. Um, on the other hand, I hear a lot of this sort of talk in neuroscience for one reason or another. Yours is a specific, unique take actually. Um, um, which is, you know, why it’s so interesting, but it’s also a sign potentially of a healthy field, right? Because if people are turning inward and thinking, oh, we, we we’re doing this wrong. Um, because what happens after a crisis is the revolution and then a new paradigm. So what I’m wondering is whether you feel optimistic about the future, or if you feel like we’re going to be mired in this crisis, um, moving forward for, uh, another century or so  </p>



<p>Henry    01:03:17    I would say that overall I’m very optimistic. Uh, I think there is going to be a revolution. Um, that’s the short answer? Uh, on the other hand, I do think there are a lot of obstacles in part because a lot of people, uh, don’t think there’s a crisis, right? A lot of people that are also optimistic, but for the wrong reasons, they think the current paradigm is good. And now that we have all these new techniques in neuroscience, you just have to use these new techniques. You can generate big data. Um, obviously the brain is so complex, so we can map everything. We can map all the connections. All the snaps is we can record all the activity from every cell, that sort of thing. Uh, obviously as I mentioned in the chapter, I think that’s a misguided approach. Uh, you never make any progress in science that way, you know, for the same reason, Galileo did not measure every object in the universe.  </p>



<p>Henry    01:04:19    Oh, you didn’t drop every stone in the world and measure how long it took them to fall. Right. So I don’t, I don’t think that’s the right approach, but I think it’s healthy in the sense that, okay, so there are people who are perhaps mainstream and they believe that the paradigm is healthy. Uh, so they want to stick to, you know, they, they, they want to maintain the status quo. Um, and there are, um, others, you know, people like me, you know, perhaps in the minority, but, um, we think there’s a crisis. Um, uh, we would like to start a revolution. So I think that’s healthy because there could be competition so we can see who will get there first. So yeah, I think it’s, uh, I think it’s exciting and I’m quite optimist.  </p>



<p>Paul    01:05:09    Oh, that’s okay. That’s a great place to end it. Henry. Thank you for coming on the show and thanks for your, uh, thoughtful, uh, work.  </p>



<p>Speaker 0    01:05:17    Okay. Thank you. Thanks for having me.  </p>

</div></div>


<p>0:00 – Intro<br />5:40 – Kuhnian crises<br />9:32 – Control theory and cybernetics<br />17:23 – How much of brain is control system?<br />20:33 – Higher order control representation<br />23:18 – Prediction and control theory<br />27:36 – The way forward<br />31:52 – Compatibility with mental representation<br />38:29 – Teleology<br />45:53 – The right number of subjects<br />51:30 – Continuous measurement<br />57:06 – Artificial intelligence and control theory</p>
]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/667a8bc2-f262-4f2d-ba05-b0bfe28b79aa-119-Henry-Yin.mp3" length="64235208"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[












Support the show to get full episodes and join the Discord community.











Henry and I discuss why he thinks neuroscience is in a crisis (in the Thomas Kuhn sense of scientific paradigms, crises, and revolutions). Henry thinks our current concept of the brain as an input-output device, with cognition in the middle, is mistaken. He points to the failure of neuroscience to successfully explain behavior despite decades of research. Instead, Henry proposes the brain is one big hierarchical set of control loops, trying to control their output with respect to internally generated reference signals. He was inspired by control theory, but points out that most control theory for biology is flawed by not recognizing that the reference signals are internally generated. Instead, most control theory approaches, and neuroscience research in general, assume the reference signals are what gets externally supplied… by the experimenter.



Yin lab at Duke.Twitter: @HenryYin19.Related papersThe Crisis in Neuroscience.Restoring Purpose in Behavior.Achieving natural behavior in a robot using neurally inspired hierarchical perceptual control.


Transcript

Henry    00:00:03    So how come neuroscientists have failed so badly yet explaining behavior? So obviously they’d been trying to do this for at least a century and a lot of smart people have worked very hard, but the result is I would say very disappointing. I think it’s healthy in the sense that, okay, so there are people who are perhaps mainstream and they believe that the paradigm is healthy. You know, they, they, they want to maintain the status quo, others, you know, people like me perhaps in the minority, but, um, we think there’s a crisis. Um, uh, we would like to start a revolution. Uh, I think it’s exciting and I’m quite optimistic.  



Speaker 0    00:00:54    This is brain inspired.  



Paul    00:01:08    Hello, it’s Paul. And today I bring you Henry in Henry runs his lab at duke university where he studies learning and behavior in rodents using techniques like optogenetics and electrophysiology. But that’s not why he’s on the podcast today. He’s on the podcast because he’s written a few pieces in which he argues that we need a new paradigm in neuroscience to explain behavior that essentially we are barking up the wrong tree, trying to study the brain like an input output device, which creates representations of objects in the world, and then programs the body to act accordingly. Instead, Henry looks to control theory and suggest that the brain...]]>
                </itunes:summary>
                                                                            <itunes:duration>01:06:36</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 118 Johannes Jäger: Beyond Networks]]>
                </title>
                <pubDate>Mon, 01 Nov 2021 16:59:37 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-118-johannes-jager-beyond-networks</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-118-johannes-jager-beyond-networks</link>
                                <description>
                                            <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2021/10/art-118-01.jpg" alt="" class="wp-image-1468" />



<p class="has-text-align-center"><strong>Support the show to get full episodes and join the Discord community</strong>.</p>



<div class="wp-block-image"><a href="https://www.patreon.com/braininspired"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2021/09/become_a_patron_button@2x.png" alt="" class="wp-image-1434" /></a></div>



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2021/10/head.jpg" alt="" class="wp-image-1469" width="250" height="250" /></div>



<p>Johannes (Yogi) is a freelance philosopher, researcher &amp; educator. We discuss many of the topics in his online course, <a href="https://www.youtube.com/playlist?list=PL8vh-kVsYPqOKJOboONJIQBd8ds0ueM_W">Beyond Networks: The Evolution of Living Systems</a>. The course is focused on the role of agency in evolution, but it covers a vast range of topics: process vs. substance metaphysics, causality, mechanistic dynamic explanation, teleology, the important role of development mediating genotypes, phenotypes, and evolution, what makes biological organisms unique, the history of evolutionary theory, scientific perspectivism, and a view toward the necessity of including agency in evolutionary theory. I highly recommend taking his course. We also discuss the role of agency in artificial intelligence, how neuroscience and evolutionary theory are undergoing parallel re-evaluations, and Yogi answers a guest question from <a href="https://braininspired.co/podcast/111/">Kevin Mitchell</a>.</p>



<ul><li>Yogi’s website and blog: <a href="https://www.johannesjaeger.eu/">Untethered in the Platonic Realm</a>.</li><li>Twitter: <a href="https://twitter.com/yoginho">@yoginho</a>.</li><li>His youtube course: <a href="https://www.youtube.com/playlist?list=PL8vh-kVsYPqOKJOboONJIQBd8ds0ueM_W">Beyond Networks: The Evolution of Living Systems</a>.</li><li>Kevin Mitchell’s previous episode: <a href="https://braininspired.co/podcast/111/">BI 111 Kevin Mitchell and Erik Hoel: Agency, Emergence, Consciousness</a>.</li></ul>


<div class="lightweight-accordion"><span>Transcript</span><div class="lightweight-accordion-body">

<p>Yogi    00:00:03    They’re presented as an explanation of what’s going on, why other than they don’t really explain anything right. That, that was one of my problems. Um, they just showed that the system is complicated. Basically. I wouldn’t even call it complex. And so, um, I became frustrated with this is you’re really a process thinker, and I think that’s really important here. You need to get, let go of those, those fixed structures. I mean, we, we can only study small aspects of development and evolution using dynamical systems theory, but we cannot capture the agency of the organism so successful that we’ve just completely forgotten all the other stuff that we’ve thrown out to make it work in the first place. And it’s time to get back to that because a lot of the problems we have right now are in understanding our situation in the world and then understanding truly complex systems that have agents in them. And of course, neurosciences are completely included in that  </p>



<p>Speaker 0    00:01:09    This is brain inspired.  </p>



<p>Paul    00:01:22    Hello, it’s Paul. On the episode today, I have a chat with Yohanis Yeager who also goes by Yogi, which is what I call him during the episode on his website, Yogi bills himself as a freelance philosopher, a researcher and an educator. And he’s actually done a lot of empirical research in systems science and evolutionary biology and a range of interdisciplinary topics as well. The reason he’s on the podcast is because I recently took his online YouTube course called beyond networks, the evolution of living systems. So the course covers a lot of ground, uh, but it’s roughly about how, because of the complexity...</p></div></div>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[




Support the show to get full episodes and join the Discord community.











Johannes (Yogi) is a freelance philosopher, researcher & educator. We discuss many of the topics in his online course, Beyond Networks: The Evolution of Living Systems. The course is focused on the role of agency in evolution, but it covers a vast range of topics: process vs. substance metaphysics, causality, mechanistic dynamic explanation, teleology, the important role of development mediating genotypes, phenotypes, and evolution, what makes biological organisms unique, the history of evolutionary theory, scientific perspectivism, and a view toward the necessity of including agency in evolutionary theory. I highly recommend taking his course. We also discuss the role of agency in artificial intelligence, how neuroscience and evolutionary theory are undergoing parallel re-evaluations, and Yogi answers a guest question from Kevin Mitchell.



Yogi’s website and blog: Untethered in the Platonic Realm.Twitter: @yoginho.His youtube course: Beyond Networks: The Evolution of Living Systems.Kevin Mitchell’s previous episode: BI 111 Kevin Mitchell and Erik Hoel: Agency, Emergence, Consciousness.


Transcript

Yogi    00:00:03    They’re presented as an explanation of what’s going on, why other than they don’t really explain anything right. That, that was one of my problems. Um, they just showed that the system is complicated. Basically. I wouldn’t even call it complex. And so, um, I became frustrated with this is you’re really a process thinker, and I think that’s really important here. You need to get, let go of those, those fixed structures. I mean, we, we can only study small aspects of development and evolution using dynamical systems theory, but we cannot capture the agency of the organism so successful that we’ve just completely forgotten all the other stuff that we’ve thrown out to make it work in the first place. And it’s time to get back to that because a lot of the problems we have right now are in understanding our situation in the world and then understanding truly complex systems that have agents in them. And of course, neurosciences are completely included in that  



Speaker 0    00:01:09    This is brain inspired.  



Paul    00:01:22    Hello, it’s Paul. On the episode today, I have a chat with Yohanis Yeager who also goes by Yogi, which is what I call him during the episode on his website, Yogi bills himself as a freelance philosopher, a researcher and an educator. And he’s actually done a lot of empirical research in systems science and evolutionary biology and a range of interdisciplinary topics as well. The reason he’s on the podcast is because I recently took his online YouTube course called beyond networks, the evolution of living systems. So the course covers a lot of ground, uh, but it’s roughly about how, because of the complexity...]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 118 Johannes Jäger: Beyond Networks]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2021/10/art-118-01.jpg" alt="" class="wp-image-1468" />



<p class="has-text-align-center"><strong>Support the show to get full episodes and join the Discord community</strong>.</p>



<div class="wp-block-image"><a href="https://www.patreon.com/braininspired"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2021/09/become_a_patron_button@2x.png" alt="" class="wp-image-1434" /></a></div>



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2021/10/head.jpg" alt="" class="wp-image-1469" width="250" height="250" /></div>



<p>Johannes (Yogi) is a freelance philosopher, researcher &amp; educator. We discuss many of the topics in his online course, <a href="https://www.youtube.com/playlist?list=PL8vh-kVsYPqOKJOboONJIQBd8ds0ueM_W">Beyond Networks: The Evolution of Living Systems</a>. The course is focused on the role of agency in evolution, but it covers a vast range of topics: process vs. substance metaphysics, causality, mechanistic dynamic explanation, teleology, the important role of development mediating genotypes, phenotypes, and evolution, what makes biological organisms unique, the history of evolutionary theory, scientific perspectivism, and a view toward the necessity of including agency in evolutionary theory. I highly recommend taking his course. We also discuss the role of agency in artificial intelligence, how neuroscience and evolutionary theory are undergoing parallel re-evaluations, and Yogi answers a guest question from <a href="https://braininspired.co/podcast/111/">Kevin Mitchell</a>.</p>



<ul><li>Yogi’s website and blog: <a href="https://www.johannesjaeger.eu/">Untethered in the Platonic Realm</a>.</li><li>Twitter: <a href="https://twitter.com/yoginho">@yoginho</a>.</li><li>His youtube course: <a href="https://www.youtube.com/playlist?list=PL8vh-kVsYPqOKJOboONJIQBd8ds0ueM_W">Beyond Networks: The Evolution of Living Systems</a>.</li><li>Kevin Mitchell’s previous episode: <a href="https://braininspired.co/podcast/111/">BI 111 Kevin Mitchell and Erik Hoel: Agency, Emergence, Consciousness</a>.</li></ul>


<div class="lightweight-accordion"><span>Transcript</span><div class="lightweight-accordion-body">

<p>Yogi    00:00:03    They’re presented as an explanation of what’s going on, why other than they don’t really explain anything right. That, that was one of my problems. Um, they just showed that the system is complicated. Basically. I wouldn’t even call it complex. And so, um, I became frustrated with this is you’re really a process thinker, and I think that’s really important here. You need to get, let go of those, those fixed structures. I mean, we, we can only study small aspects of development and evolution using dynamical systems theory, but we cannot capture the agency of the organism so successful that we’ve just completely forgotten all the other stuff that we’ve thrown out to make it work in the first place. And it’s time to get back to that because a lot of the problems we have right now are in understanding our situation in the world and then understanding truly complex systems that have agents in them. And of course, neurosciences are completely included in that  </p>



<p>Speaker 0    00:01:09    This is brain inspired.  </p>



<p>Paul    00:01:22    Hello, it’s Paul. On the episode today, I have a chat with Yohanis Yeager who also goes by Yogi, which is what I call him during the episode on his website, Yogi bills himself as a freelance philosopher, a researcher and an educator. And he’s actually done a lot of empirical research in systems science and evolutionary biology and a range of interdisciplinary topics as well. The reason he’s on the podcast is because I recently took his online YouTube course called beyond networks, the evolution of living systems. So the course covers a lot of ground, uh, but it’s roughly about how, because of the complexity of us as biological organisms functioning in a highly interactive and complex environment, we need to rethink evolutionary theory. And Yogi makes an argument that we need to add a new perspective to evolutionary theory that accommodates a role for agency as biological organisms.  </p>



<p>Paul    00:02:24    And the course has the title beyond networks, because within this agential perspective, we need to somehow move beyond the dynamical and mechanistic explanations that we currently use to study things like gene regulatory systems, which are traditionally thought of as networks of interacting genes and products of those genes and so on. So I wanted to have them on, because first of all, I really enjoyed his course as you’ll hear. Uh, but also because his argument applies equally well to explaining brains, which are in the same complexity realm as organisms, obviously. And given that on this podcast, we often talk about using networks like deep learning networks to, uh, explain intelligence. I think that yogis is an important message to consider. So I highly recommend the course note, the term highly recommend because fair warning. If you do watch the videos, your reading list will exponentially increase with all the books and papers that he quote unquote highly recommends throughout.  </p>



<p>Paul    00:03:25    We also have a guest question from Kevin Mitchell today who was on the podcast recently on episode 111. I linked to Yogi’s blog and website and to the course that we discuss in the show notes at brain inspired.co/podcast/one 18. If you find the podcast valuable, consider supporting it on Patrion, we just had our first zoom presentation and discussion group through the discord server that I run for Patrion supporters. This one was about the landscape of cognitive science and it was a lot of fun. So I look forward to having more of those in the future to support the show, just click the Patrion button@braininspired.co, all right. It was a pleasure having Yogi on, and I hope that you enjoy the discussion as much as I did. I came on to you. Um, and in fact, uh, we’re going to talk about your online course on YouTube called beyond networks, the evolution of living systems.  </p>



<p>Paul    00:04:20    And I I’d like to say I came onto you through academic means, but I think YouTube figured out that I was looking for biological autonomy topics because I had read all of our own Moreno and Mateo Moscow’s book on biological autonomy. And I was either searching for it or YouTube knew that I wanted to search for it. And then that’s how I came across your course, which, um, I just want to say is I, I really love this course and, um, I’ll probably recommend it in the introduction, but I, I just want to reiterate that I recommend it to all my listeners to check this course out. But before we talk about that, I would love for you so that I don’t botch it to introduce yourself and talk a little bit about your, your background and, and the empirical research that you’ve done. Uh, and then how you’ve sort of transitioned and your trajectory to your current thinking.  </p>



<p>Yogi    00:05:13    Well, thank you very much. First of all, um, it’s, it’s really nice to hear that the lectures are sort of reaching beyond their initially intended target audience, which it was sort of accidental. That’s really nice to hear, um, uh, evolutionary biologists systems biologists I had for years been a researcher in the lab myself, and then have an empirical was the head of an empirical lab at the center for genomic regulation in Spain. And I was looking at the evolution of gene regulatory networks that are involved in the early development of, um, especially fly, stick insects. But the aim was to sort of learn general principles, it’s network evolution. And I was using dynamical systems Siri for, uh, my work. And, uh, I guess it’s, I’ve always been a bit of a philosopher. So I was reading philosophy as a high school student. I was interested in the philosophy of science while I was a student.  </p>



<p>Yogi    00:06:15    Um, and I read beyond the classes that I took about the philosophy of science, but it was at that time when I was a sort of a, a PhD student still really, I noticed that we had a really hard time publishing our work at the time. The field was very hostile to the sort of modeling studies. And I also realized that that reviewer students sort of criticized the methods that we were using, but they didn’t get the questions that we were asking. And so I took this step back and I was wondering about what kind of questions do scientists. This set me on a trajectory that got me into becoming the director of a small Institute for the philosophy of science, which is called just outside Vienna a few years back. I didn’t stay there for very long for various reasons, but since then I’ve continued on this philosophical trajectory.  </p>



<p>Yogi    00:07:10    And during my time at that Institute, I could make some really fantastic connections. Scientists don’t usually get in touch with philosophy was a science, but I had all these people that I was working with. And there’s some really good people out there that know a lot about not just the science that we’re doing, but also how we do it. And it’s a pleasure to be working with several of them now in collaboration. So my work has taken that philosophical term, but I’m still doing biology. I would call it philosophical biology. It’s a type of theoretical biology that I would put famous people like Conrad, how Waddington into that has been on the back burner for the last 50 years or so. And I think it’s high time to revive it. Can I ask,  </p>



<p>Paul    00:07:53    So you said that you were studying or reading philosophy in high school and interested in it. And I did too, but I really didn’t understand looking back. I really didn’t understand it. I didn’t have the same grasp on it that I believe I do now. Of course, that’s probably not true also. But do you feel that same way or did, did you get it back then?  </p>



<p>Yogi    00:08:12    No, absolutely not. You also read the, you know, of course I still like that book, but as you said, the context matters a lot and, uh, I am definitely reading very different things right now. Uh, this was not a sort of a plan’s trajectory. I meandered through a lot of this explore. This is something I use in my work on academia as a system as well, is that we have to have space to explore. And a lot of it is serendipity when you give yourself the space and the time to explore, which is very important in my own trajectory. What would  </p>



<p>Paul    00:08:55    You say right now is the balance between your philosophical work output? Let’s say and empirical, because I know you’re working on multiple philosophy, uh, manuscripts,  </p>



<p>Yogi    00:09:08    I just completely left empirical science. My labs shut down in 2015. Um, I’m still carrying on some of the specialized work in evolutionary development and evolutionary systems biology, uh, through our work, uh, on, on concepts of process homology, muscularity dynamical, modularity, and so on and so forth. But, um, I would say I’ve moved on, especially in my scientific work with what I call philosophical biology and interested in the concept of agency and its role in evolution, which is probably something we’ll talk about today. So I I’ve taken a turn, um, an irreversible turn away from empirical sites, I would say.  </p>



<p>Paul    00:09:48    Okay. So let’s, let’s talk about, um, yeah, you mentioned agency, um, and its role in evolution and that’s kind of the focal point and end point of your course beyond networks. One of the things that I found interesting that sort of lit me up was I just see this parallel between what you’re talking about in, in tying together, genotypes and phenotypes, and how to understand evolution in the complex systems that we have and how development plays a role, uh, in that process and using dynamical systems to, to model that. I just see this massive parallel with what’s going on in modern neuroscience as well. So that’s why, uh, I thought immediately, oh, I’ve got to have you on, um, because I wanted to explore this and I haven’t thought deeply about making, you know, super close ties and, and exploring what it means for neuroscience, but this is something that, uh, I want to pursue further. So, um, I don’t know if you, so I don’t know how familiar you are with the modern landscape of neuroscience or at least, you know, one, one facet of it that we talk a lot about on the podcast, but maybe, maybe what you could do is just give a really broad overview of this, uh, of the course. And, and then we can go from there  </p>



<p>Yogi    00:11:08    And two or three sentences,  </p>



<p>Paul    00:11:11    Two or three sentences, of course, by the way, I should say this thing is 12, it’s like 48, uh, you know, 30 to 40 minute videos. Uh, so it’s, and it’s super rich with historical perspectives, quotes, philosophical perspectives, and, uh, the modern science of evolution and genetics. So yeah, take it away.  </p>



<p>Yogi    00:11:32    The executive summary of this night, I was aware I’m superficially aware of what’s going on in neuroscience, through colleagues that are engaged and they work there. And I am of course, aware that a lot of the arguments I’m making in my lecture apply, um, as well, it’s not published in the field, but the central point, I guess, is that I was interested in, in, um, the limits of, uh, limitations of dynamical systems modeling because I was always claiming that I am a process thinker that, um, you know, everything explanations has to be more focused on processes and biology. And this is really important in, um, the field of genetics, genomics, but also in neuroscience because of this increasing pervasiveness of, of networks that you see everywhere and they pop up everywhere and often they’re, they’re just, you know, sort of hairball graphs and systems biology with lots of nodes and connections.  </p>



<p>Yogi    00:12:30    And it’s, they’re put in front on a slide and, you know, they’re, they’re presented as an explanation of what’s going on, why they, they don’t really explain anything, right? That, that was one of my problems. They just show that the system is complicated. I wouldn’t even call it complex. And so, um, I became frustrated with this and that brought me into contact with people very early on through my master’s supervisor, Fred Goodwin, and then my PhD supervisor, John Rice was pioneering this approach of using, um, dynamical systems mobile with data, um, to, to, uh, describe the actual dynamics function and evolution of gene regulatory networks. And this combination of empirical and theoretical work really appealed to me. It was really new. This was before systems biology was called systems biology. We’ll call it functional genomics, what we were doing at the time. And I claimed, I went around and said, we’re really looking at this in terms of process, but I soon became aware that the methods we were using are also still very much rooted in this network view of living organisms.  </p>



<p>Yogi    00:13:41    And if you, if you look at living organisms, what they do is they, they changed their structure constantly. So we capture a specific structure in a, in a dynamical system model. And from, from that point of view, it’s still static, right? You have the equations that describe the interactions and those interactions are shakes. So, uh, I became interested in, uh, went back to work. I did during my master’s thesis, uh, where I got in touch with, uh, the work by much around on Varela. And I read a lot of very low. I did a master’s in holistic science in the Southwest of England, little hippie college coach Schumacher, call it, read a little Varela and got in touch with, uh, uh, embodied cognition and activism. And so it’s a funny twist. I basically took those ideas that came from, from neuroscience, cognitive neuroscience, into the field of genetics.  </p>



<p>Yogi    00:14:38    So it’s come full circle. And through that, I, I became aware, uh, and sort of my employment philosophy science Institute, uh, got to know, uh, Alvara Moreno, Moscow who are doing work with agency, organizational accounts of organisms, and their theory is very much rooted in Varela’s work. Uh, and, uh, shows that that the essence of biological organization is in the constantly change and structure of the organism. It’s the south making auto poetic aspect is that, um, it never stays the same. It’s like the red queen and Alice, which rich has to run to stay the same. It changes all the time. And so, uh, there is this old argument going back to theoretical biologist, Robert Rosen, that you cannot actually model this sort of organization. And it’s very controversial. And I became interested in these sort of questions because, uh, issue you’re really a process thinker. And I think that’s really important here. You need to get, let go of those, those fixed structures. I mean, we, we, we can only study small aspects of development and evolution using dynamical systems theory, but we cannot capture the agency of the organism. And therefore, I think that’s crucial for neurobiology. If you come back to that, um, it’s, you need a dynamic approach basically, but a dynamic approach that is radically dynamic, not like dynamical systems. Right.  </p>



<p>Paul    00:16:14    Well, I mean, you know, as I was saying that I see the parallels between how you’ve used dynamical systems theory within this limited, uh, um, approach to, you know, model the developmental process, et cetera. And so in neuroscience, dynamical systems theory is all the rage right now to take a whole population of neurons and, uh, figure out what they are doing through there, like a trajectory through these lower dimensional spaces, uh, and map that on to eventually to behavior. So that was kind of the, the parallel that I saw. I, I wonder if I should, um, you know, what I’m going to do, I’m going to interrupt our conversation because, uh, since we we’ve talked about dynamical systems theory, so, so I’m going to play you a question from a guests, uh, before we move on. So I had, I had Kevin Mitchell on, we didn’t talk too much about his book innate, but in the book innate, um, it’s all about how development has had, you know, gotten the short shrift in the story of our genotypes and how that leads to our behaviors, our phenotypes and our behaviors anyway. So I thought he would be a good person to ask to, come on and ask you a question. So I’m going to play this question for you, and then we’ll continue our conversation.  </p>



<p>Speaker 3    00:17:23    Hi, Yogi, Kevin Mitchell here. I’m a big fan of the holistic non reductive approach that you and your colleagues bring to biological questions, which feels very rooted in principles of process, philosophy, and system thinking that were popular for a while at various stages in the 20th century, but which were then replaced with a very mechanistic and reductionistic outlook. It feels to me like holistic dynamic approaches are gaining traction again, probably because we now have experimental and computational tools to generate and deal with dynamical datasets. And I wonder if you feel the same way in the perception your own work is getting  </p>



<p>Yogi    00:18:00    Yes. Thank you, Kevin. Um, yes. Uh, I do feel that way. And as I just said before, it dynamic, even going beyond dynamical systems, beyond six structures, this is extremely important. And I think there is a, there is a big revolution coming at some point. It’s a little frustrating to see how slowly it’s catching on a lot of the empiricists have problems seeing the practical use of these things, because they’re, you know, these ideas are still very theoretical and a big challenge is to bring them to the bench basically. And that’s, you know, even though I work theoretically now exclusively that’s one of my big aims is to work towards getting those series in, in, uh, the range of empirical track, the tractability. I think that’s extremely crucial. Uh, I think Kevin is a very optimistic person and I like that. And also I, it’s nice to see that this work is seen among those people that really matter, but I wouldn’t say it’s gotten a lot of track traction in the, in the mainstream of, uh, genetics or developmental genetics.  </p>



<p>Yogi    00:19:14    Um, and it’s a bit sad to see how theoretical work is massively undervalued in those fields. I think one of the reasons is the technological progress has been so fast and the temptation to just produce datasets and resources has been overwhelming to a lot of people. So that we’ve forgotten a little bit about what were they earlier called philosophical biology. And I think it’s very important to get back to those conceptual questions now, and I’ve been trying to get people to sort of notice and get interested in it, but it’s really hard. I think it’s also has a social dimension of everybody’s under a lot of pressure to, to just produce stuff and these sort of questions that aren’t very conducive to, you know, career basically in today’s academic system. I have to say,  </p>



<p>Paul    00:20:02    Well, thanks Kevin, for the question. So sorry to interrupt us. Cause, uh, I wanted you to continue talking about, about the big ideas, um, from the course before you do that though, I am going to interrupt us again because a big thing that you talk about is process based philosophy, a process metaphysics, a process approach. And I love, uh, process metaphysics. I still find it, uh, substance metaphysics, the idea of things is so ingrained and so trained into me that I, I still have a whole lot of trouble thinking of things in processes. And I’m wondering if that gets easier, if you think of everything in processes or if you still struggle and think of things as think of processes as  </p>



<p>Yogi    00:20:47    Right. So, so yes, that’s a very good question. I mean, you don’t right. I mean, so basically there’s a beautiful work by Johnson, for example, that look at the metaphors, we live by the book where they, they describe something that they call the containment doctrine. They can show that very early in your, your childhood. Um, you, you form this, this vision of the world is, you know, basically I call it the Tupperware model of reality. It’s basically boxes within boxes, containers within containers. So that’s very, very deeply ingrained. And what is important here is to say that for a lot of questions and topics, you do need a process based explanations, a process based approach because it is very hard there’s I think Quine was the first philosopher who brought up the absurdity of it all he said, you know, you’d have to sort of change language, for example, right?  </p>



<p>Yogi    00:21:44    Subject object, sort of, uh, he used to sentence cat is the white cap is bristling towards the dog. It’s capting Whiteley a bristling dog. And so that’s taking it at absurdum. Also bore has, has a beautiful argument about this in his short story, learn, uh, Orbis, terraces, uh, mustaches, it’s called this a beautiful story. And so you don’t have to, uh, get rid of your language, your thinking, but you have to realize that sometimes this very deeply ingrained pattern of thinking is hiding, um, aspects of phenomena, of questions. It’s preventing you to ask questions that just don’t occur to you. If you think, uh, like this, for me in genetics, it’s, it’s very, um, strong because you have this idea that you can explain processes, developmental processes, behavior in terms of genes, which are things genes are. So they’re like pearls on a string, right?  </p>



<p>Yogi    00:22:51    And so you have a huge gap between how has that same causing any, any sort of behavior or phenotype. And that’s been neglected for a long time because we are happy with saying, you know, this gene does that, but does that even mean, right? And these are quite obvious questions that are also beautifully treated in Kevin’s work. I have to say, um, that have often been, so obviously in our face that you didn’t see them anymore sometimes. Uh, and there’s a beautiful Whitehead quote from philosopher Whitehead, where he says that it’s exactly those things that are so obvious, obviously not right, that we don’t even see them anymore. And these are the things that you have to, um, it’s actually, I used to word thing all the time. It’s terrible. So yeah, it’s exactly those aspects of reality that questioning, if you question one of those and really find something, then that’s how we’re going to dig deep insights occur. Of course, some changes in how we perceive the world. And I think that’s very true. So this is one of the challenges here is to see where, and when do you use process oriented thinking process explanations, and when is it okay if it’s okay. In many aspects, in many areas of life and science use substance based explanations, that’s fine.  </p>



<p>Paul    00:24:17    Yeah. In science, we’re very concerned with definitions, right? So what’s the definition of a gene. And, uh, so on the podcast, we talk a lot about intelligence and natural and artificial intelligence. And I feel like when you name something like intelligence, it reifies it. And all of a sudden it seems like a thing. And you know, I don’t, we’ll get into this later, uh, about, you know, whether what kind of thinking process thinking or otherwise to apply to these sorts of things. But I feel like the entire world of intelligence, natural artificial, whatever those words, you know, whatever intelligence means it, uh, is really would benefit from a process based approach.  </p>



<p>Yogi    00:24:58    Absolutely. I mean, the one thing that got reified and has a really negative impact is information. I mean, these sort of absurd claims that information is just as fundamentalist, substance or whatever, you know, it’s, it’s something it’s a way of looking at the problem, right? If you use an information-based approach, you have a certain way of looking at a problem, but it’s not like the university people, some people say the universe is made of information. Does that mean it’s just completely meaningless? And so I guess I agree with you, we have to think about what do these terms mean and what kind of work. I mean, the, one of the really important practical aspects of doing philosophical work and sciences to check concepts, to examine what is the work they do. And a lot of the concepts are using don’t do any, any real work at the moment. So I think that’s, that’s one of the reasons we have to sort of rethink that’s what I tried to do in the lecture to question a lot of those concepts that we use everyday, or actually a lot of them are just metaphors, you know, they’re not defined beyond the genetic program and so on and so forth. These are metaphors that we use very carelessly without realizing that they’re metaphors anymore.  </p>



<p>Paul    00:26:13    All right, well, I really derailed us, but let’s get back to, uh, agency and the idea of, you know, um, closure and well, I’ll let you, I’ll let you continue about how you think of agency and its role in evolution.  </p>



<p>Yogi    00:26:27    So this is actually stuff that came out of making this lecture. The lecture itself has been a process. I’ve been giving this lecture for years at the university of Vienna to about 15 students before it actually forced me to record it. And then I thought, why not put it up on YouTube? So that’s what I did. Then that’s viewed by hundreds of people, which is fantastic. All who have taken the time to do that. So intellectually I sort of came to agency very late, right? Just like in my career. And now I’ve sort of, uh, really interested, uh, in, in the role it plays in evolution. So here’s the big historical movement from Darwin Darwin’s theory of evolution was a, uh, theory of struggle for survival of the individual of the organism organism based theory. Um, it had its very big difficulties, right? Had no mechanism for inheritance and it had no mechanism for the production of phenotypes either.  </p>



<p>Yogi    00:27:33    So this Siri was then, uh, completely transformed in the twenties, in the thirties, through the modern synthesis and the rise of population genetics, which bracket that the organism. So it looked, uh, below the organism at the genes and it looked above the organism at the population level and it completely forgot about the organism. The organism just became the sort of interface where population level and genetic level interact. It had no importance. And of course, even Darwin knew, uh, that, that the behavior of organisms actions, choices that organisms make have, have really important consequences for evolution, but it’s become a really big taboo topic. Uh, not just because we, you know, the term mechanism, mechanistic explanation is often mistaken nowadays, meaning you have to explain something at the level of molecules, the molecular gene. And I think that’s, that’s just crazy. We’re dealing in the life of neurosciences hierarchical multilevel systems that need to be explained multiple levels.  </p>



<p>Yogi    00:28:40    So, and there’s no scientific reason to focus on only one level. That’s just a historical thing that happened. Right. So I think I’m interested in, in these higher levels. And I was, I was thinking, what does the organization, what role does organization play in evolution? This sort of weird, uh, biological organization. So let me, I probably have to talk a little bit about this. So please do this notion of closure, organizational closure, um, is an idea that you have to basically account. It goes back to comp. It’s a very old idea. It was explicitly formulated by, um, developmental psychologists. And the idea is that you account for all the causally important factors from within the system, basically everything that you need to continue is, has to be produced from within the system. Now it’s very important to make a distinction between that sort of closure and thermodynamic closed system, right?  </p>



<p>Yogi    00:29:46    So systems with organizational closure have to be thermodynamically open. They have to have a constant flow of matter and energy through the system. Otherwise they cannot achieve this organizational closure and what it ultimately amounts to is that you have a life cycle, right? So you have your, at the end of the life cycle, you have produced something that looks a little bit like you. And that is one of the fundamental principles of evolution. You need a principle, not just a variation, but you need the right kind of variation and you have to pay a lot of attention to that. Uh, if you want to be truly evolvable, a lot of people have pointed out that very simplistic concepts of evolution like replicators, you know, reductionist accounts that were, were pioneered by really smart people like, uh, John maintenance, others like Richard Dawkins, uh, you can have a naked replicator that just makes copies of itself.  </p>



<p>Yogi    00:30:41    It was pointed out really early on in the seventies, actually, um, by, uh, I going to choose that you get something that is called an Erik catastrophe. So if you have a molecule that replicates itself, you get errors and the errors are just linearly, accumulating. And at some point, if you have a exponential, an exponential copying mechanism, you just get errors. I mean, there’s no way to maintain a species that could be selected for, um, like that. At the other end, you have self-organization work that is about auto catalytic systems, for example. So self-organizing systems that maintain themselves in a certain state, but they can’t vary either, right? Because as soon as they vary, they’re no longer auto capitalistic. So they know we over produce themselves by definition. And so you have no variation then consider it. And so this is a really tricky problem.  </p>



<p>Yogi    00:31:36    And so I realized thinking about this, that you really need the sort of life cycle that organisms have, and that life cycle depends on organizational closure, this specific sort of the snake that bites its tail organization of living beings. This is a very superficial description of a very complicated theory here. Now, do you also get with closures agency? Okay. So what you have to imagine is that if you look at it from a dynamical process oriented point of view, your current state as an organism, depends on past states off of the environment, you react to your environment, but also on your past states and those of your ancestors. So partially at least your current state can only be accounted for through previous states that you have internally. So a lot of it depends on causes that come from within, and that’s exactly what it means to act, to have agency, right?  </p>



<p>Yogi    00:32:33    So true, a true concept of agency. And I’m not talking about people who want to explain it away by it’s nothing but information process sustained processing. It is not like that. It is something that, you know, you have agency when some of your actions are caused from within your own system. Um, and that can only happen if you have organizational closure. So if you have that closure, you have a certain basic, uh, agency. So I’m not talking about making decisions, conscious decisions at all, that that bacteria has this type of basic agency so that the bacteria can in a way, decide to swim up the gradient by triggering certain behaviors in a very stochastic way. And it has a very limited repertoire of actions, but still it makes we don’t even have the language to talk about this. It’s very dangerous to say it makes decisions.  </p>



<p>Yogi    00:33:25    It’s not thinking about, but it’s internally generated. And it has a selection of different behaviors that it can do otherwise. And it can only do that because it has, um, closure. So basically what we’ve connected here is the principle of systems that are, evolvable have to have this close lifecycle, but these systems have to have this particular type of organization automatically comes with agency, a certain kind of autonomy from the environment. And so basically agency and, uh, FO’s ability go hand in hand. So the claim and my most recent work is that you cannot even get evolvable systems that are not natural agents. And so then it becomes really important to ask what does that degree of freedom, um, entail for evolutionary theory? I mean, it has a really big consequences. Think about niche humans altering the environment instead of altering themselves. So we adopt through changing our environment, uh, completely more or less nowadays.  </p>



<p>Yogi    00:34:34    This may not work out very well in the end, but all these sorts of additional, um, dimensions are completely excluded. If you look at, uh, just the genetic level. And that’s something that a lot of critics of reductionist evolutionary theory are saying, but here we have a specific perspective that I call an agential perspective. So it’s not just a process perspective, but it’s a specific type of process perspective on evolution, which allows us to ask different questions to make these questions also a legitimate to ask, because there’s a big taboo about asking, you know, what does, what the organism wants? What kind of role does that play in evolution? You know, immediately dismissed as some sort of mystical, uh, teleology, but that’s not the point. That’s exactly right. It’s, it’s, it’s taking the phenomena serious and trying to come up with a scientific explanation, but this explanation may not be mechanistic for reasons that I explained in the lecture series again, because, um, these sort of behaviors are not necessarily explainable in the kinesthetic terms. So it’s not scientific. I repeat that it’s not mysterious or anything, but it’s not, it will be considered a traditional, a mechanistic explanation, which is just taking the system from state a, through some causal chain. Okay. Because it has this sort of snake bites, its own tail causal structure, which is very convoluted. It’s not obvious how to deal with it in mechanistic terms basically,  </p>



<p>Paul    00:36:10    But in your L your lecture series, you, um, you, you would talk a lot about complex systems and how difficult they are to understand. And that’s why a multiperspectival approach is beneficial. Um, one of the things that you talk about is near D composability from, uh, Herbert Simon, right? And, uh, and where everything is connected and everything is affecting each other, but some things are way more important and some things are less important. And I’m wondering if, if you had to speculate this agential perspective in evolution, how important would it, you know, will it end up being to the, the process of evolution or as you know, among the multiple explanations of evolution, will this, uh, will the essential perspective be just a tiny thing? Will it be the main, become the main driver, uh, of, uh, explanations for evolution?  </p>



<p>Yogi    00:37:02    I have no idea. The thing is we haven’t asked, we haven’t looked right. This is my problem. I’m not, so here don’t get me wrong. I’m not saying, look, this is agency is crucial for evolution. I’m just saying, I can make an argument. That systems that are evolvable have agency that coincide. So, so if you have an agent it’s evolvable, if it’s a vulnerable, it’s an agent. Um, so it’s worth looking at that. This has not been looked at these questions have not been asked, so we don’t know the answer. It may well turn out that it’s completely unimportant, but I’m not saying you can’t just say that a lot of reductionist geneticists, just say, I know it’s not important. No, macroevolution is just an extrapolation of population level frequency shifts. And that’s just made up, there is no argument scientific argument behind that. That’s just a simple extrapolation, linear extrapolation, and nobody ever has done any serious work on this. So that’s what I’m saying. It would be worth exploring, you know, in the end, you know, near the composability is nicely illustrated by astrology, right? I mean, astrologist have a point, but they say the climates do influence your relationship. Sure. But the people who you have the relationship with, they’re probably much more important for your relationships.  </p>



<p>Paul    00:38:26    What’s your sign, Yogi  </p>



<p>Yogi    00:38:29    Pisces.  </p>



<p>Yogi    00:38:35    Yeah. So, you know, everything is connected to everything, but a lot of this stuff is really these interactions are really not important for, for, you know, whatever questioning, if this is the basic principle of perspective or realism, is that you’re, you’re going at reality with a certain angle. And you have a certain question. And in that context, some perspectives are better than others. Some perspective is, and is not relativism. Like, you know, some post-modern thinkers say everything is just that this score is all kinds of knowledge or the same perspective. Isn’t is not that it says that for certain contexts, certain perspectives are better than others. You more sound or robust, trustworthy knowledge, but you cannot extrapolate that across all circumstances ever. You know, and that’s basically my favorite definition of a complex system is a system that will show ever new properties in new contexts, right?  </p>



<p>Yogi    00:39:30    So you basically can never list all the possible properties of a complex system in advance. That’s an argument that is strongly making. And so you get true radical emergence and evolution, true innovation. It’s fundamentally unpredictable, which I think is a great thing. I would really not like to live in a clockwork sort of classy and demon universe, where everything is determined. And so this is a beautiful view of a process-oriented open-ended view of the world. That is, again, going back to Alfred north Whitehead and where he called a Siri of organism, which was the view of the metaphysical system that viewed the world to the universe as more like an evolving process and evolving organism, then a mechanism. I think it’s, there’s nothing dodgy or mysterious or unscientific about that. We can have a science is compatible with that, and it would be much better to study evolution using that sort of science than the traditional mechanistic approach.  </p>



<p>Paul    00:40:37    You, you argue also that I’m this age, a potential, uh, perspective is in line with this. Open-endedness what you’re calling radical, uh, emergence, uh, through its interactions with the environment, because it changes the environment and that environment changes the agent. And that, and there’s this, this interaction where the adjacent possible that Stuart Kauffman, um, uh, preaches about, uh, is this dynamic ongoing open-ended process  </p>



<p>Yogi    00:41:05    That’s right. Preaching, but he’s, he’s got the point, right? I mean, so, yeah, I really liked the argument that for also for, uh, bill Windset is making, he has a book that’s called re-engineering philosophy for limited beings. And in the first chapter, he’s sort of taking, uh, it’s called, uh,  missions. And basically he says a lot of the theory of knowledge and science through the ages has been made for an unlimited in demon who knows everything about the universe and its future. And its past is gone. It’s not a limited being that’s in the universe. And any limited intelligence needs a different kind of approach to epidemiology, to the theory of knowledge. And he builds that perspective or approach out of this argument. And I think this is very powerful and you end up a bit, ironically, you end up with a much more realistic, uh, theory of knowledge. Then if you, if you base it on certain sort of dreams of the final theory is a very sort of things do.  </p>



<p>Paul    00:42:09    Yeah. So there are so many different ways we can go here. I’m just, I’m itching to bring up the, this, the AI, uh, work that you’re working on with Stewart. But I want to ask, because you talked about how a bacterium is an agent and how we are agents, but to be an agent you don’t need any consciousness. You don’t need any awareness. Where do you see a role for brains and or minds? If you do see a role in this, you know, agential open-ended evolution perspective.  </p>



<p>Yogi    00:42:42    So I see the, the zone between agency, basic agency and cognition, cognitive agency, as a sort of a gradient, I think nervous systems evolved on this sort of foundation of basic agency. One reason for it is, is to enrich, uh, the repertoire of actions that you can select from. Of course you have, if you’re mobile and you have a nervous system, you’ll have a lot more choices, then next year, a bacterium or a plan. And that created its own sort of dynamic of evolution in that sense. So I think, and this is why it’s so important to build a vocabulary for agency without consciousness, because the problem is if we modeled it to, then we get to panpsychism and stuff like that. Let’s just say that I’m not sympathetic.  </p>



<p>Paul    00:43:40    Okay. Very good at that. Yep.  </p>



<p>Yogi    00:43:43    That’s all I have to say about that. So I think that’s precisely an example of where we mistake. Uh, we were used to think about phenomenon that come out of agency in the context of consciousness, because that’s how we work. Right. And so it’s really hard to abstract this way to, to, uh, organisms that don’t have consciousness or cognition. So that there’s a few, I think it’s  </p>



<p>Paul    00:44:12    Bizarre. And this number, perhaps  </p>



<p>Yogi    00:44:15    I don’t think it’s, yeah, I think it’s, uh, if anything, it’s not helpful, you know, it’s, it’s maybe not helping because it muddles, uh, discussions that we have to have about, uh, consciousness about free will. These are really hard. I, I don’t know what consciousness is. I don’t have a particular opinion about  </p>



<p>Paul    00:44:32    That. It’s a thing.  </p>



<p>Yogi    00:44:33    Of course, of course, it’s a thing I would like to understand basic agency before we move on to consciousness. And, uh, there, I’m very careful, you know, just this a personal choice, because I think the questions that we ask about consciousness, not really well posts, and I think we are now at a sort of stage in history time in history where we can start to ask, well, post questions about agency, but we lack the vocabulary. All the vocabulary have about, you know, making decisions, selecting behaviors, all of that is based on what we do as conscious beings. Of course it muddles the waters. It makes it sound more, um, hokey pokey than it is. And often you confuse people because he seemed to be making the claim that, that everything is consciousness. Definitely don’t agree with.  </p>



<p>Paul    00:45:30    Let’s talk about AI then, because, uh, so you and Stuart Kauffman are co-writing this manuscript, uh, where you sound the alarms about how artificial intelligence is gonna take over the world and kill us all. Is that right? Do I have  </p>



<p>Yogi    00:45:43    More or less? So we’re discussing at the moment. And also I want to mention, uh, Andrea Rowley, an AI researcher from Italy, who’s involved in our conversations three-way conversation. And we we’d like to have this discussion in a publication. Of course. So here’s the thing about, uh, organizational closure, this weird organization, um, it gives you agency. So basically you could formulate it in different terms. You can say it allows you to want things. Uh, again, we don’t have the right word. So a bacterium does one things in a conscious way. Like we want things, but it still goes for the food. You know what I mean? So it’s, it’s sort of a truly goal-oriented behavior. And the argument is basically simply to say that you cannot make an algorithm or an AI just translates to the question about cost, function, choice to begin with, or even to, to choose or not, whether you want to optimize the cost function or not. So the argument is that in, in AI, first of all, all agents as an AI, uh, researcher would call them are simply algorithms, input, output processing procedures, and the argument of organization, the organizational account of organisms is that organisms are exactly not like that. They are more than that. So because they can cause actions from within the system, they have a degree of freedom. Um, somebody, I forgot the name of the office of the paper called freedom from immediacy. They don’t after organisms, don’t have to respond to the environment,  </p>



<p>Paul    00:47:27    Golden shad Lynn, I believe  </p>



<p>Yogi    00:47:29    That’s right. You’re right. So freedom from immediacy. You don’t have to, it’s basically an argument about, you know, you could have done otherwise. Some sort of, again, it’s not the argument about free will free will, is something more evolved? Consciousness took the bacteria have behaved in a different way. Basically the answer, if you believe in what I would call a strong concept of agencies, yes, there are, uh, there’s a freedom there, a degree of freedom. So I think that’s very important. Now, the argument that we’re trying to make is that this degree of freedom is not algorithmic because it’s not formalized a hundred percent. And there are several arguments that have been made in the past, uh, about this, uh, one of them by Robert Gross and the first one who is often misunderstood as saying that you cannot model an organism and it’s been shown that you can make a simulation of an organism that behaves like an organism.  </p>



<p>Yogi    00:48:28    You have to use a recursive sort of functional programming paradigms to do this complicated. You can’t just do traditional consistency theory, but you can do it. But there was argument wasn’t about that. He said, it’s not complete. So it’s an incompleteness argument analogous to go to the incompleteness theorem and mouth. Good. We’ll show him that number series and compete doesn’t mean you can’t use numbers here, right? It just means that it doesn’t capture all possible statements about numbers. And this is the same argument. So basically you can make a model of the organism, but the organism can always surprise you because it can have, it has this degree of freedom to act in a way that it’s never done before, because it’s action right now, it’s state right now. It depends on its entire history that we’ll be sharing history in the end. And so its behavior is fundamentally unpredictable unless you know, this entire history, which is impossible.  </p>



<p>Yogi    00:49:25    So that’s one argument. The other argument presents it’s complimentary to that. And he says, he cannot simply cannot predict all the possible functions of a, of a complex system. So he takes his screwdriver as an example, as, as he has been designed to tighten bolts, but you can also use it for all kinds of other purposes. You can pry a door open with it. You can pick your nose. I think it’s same as well, whatever the context of its use, it’s always different and ever evolving into the future. It’s never the same. You, this is a radically complex, depending on property function, the function of the screwdriver. And so as soon as a thing, an organism across this has a function, you, you cannot predict all the possible functions anymore. And so that brings you this, this sort of Jason possible view of radical radically emergence, but you simply cannot predict the specific context before it actually happens.  </p>



<p>Yogi    00:50:33    There’s another beautiful book that I recommend, uh, by, uh, process philosopher, Nicholas restaurant, which is called where he makes the same argument in the context of discoveries of sciences. As if you could predict the specific discovery in the future, then you would have already made it. So there’s a logical paradox there. So specific discoveries in the future or facts that are fundamentally unknowable, they just can’t know them because if you do, then you’ve already made it. And then this is not a future discovery anymore. So I really like this sort of use. So we’re tying together these, these arguments to say, okay, so we cannot have, um, this sort of open-endedness this, this sort of surprise element in the behavior and the evolution of organisms in an algorithmic system. And since all our AI agents are algorithmic, um, they cannot actually do that. So the argument is basically, maybe you can call it a stronger in a weak sense of agency.  </p>



<p>Yogi    00:51:35    The weak sense of agencies is simply algorithmic information, and you can make an argument that the, what makes biological agents true agents goes beyond that. And therefore you cannot have this sort of artificial general intelligence, thanks, Skynet that suddenly wants to exterminate humanity. It’s not going to do that. Why would it want to exterminate humanity and how do you program it? It will always be limited in some way, by the way, in which you set up the, the AI system in the first place while living systems are not, um, they are also constrained, but they can break through those constraints eventually, uh, through evolution while an AI simply can’t do that because it’s algorithmic,  </p>



<p>Paul    00:52:21    You create, um, like someone like can Stanley, um, and you know, lots of people working on evolvable systems.  </p>



<p>Yogi    00:52:30    So these systems are always evolvable in a limited way. I mean, this has also, I mean, the failure of artificial life, you know, that the sort of, why, why do these evolutionary simulations always get stuck? It’s my strong suspicion that it is because these agents, again, that’s a bit of a misnomer. I always scare quote, the term in this context are not true agents. And so I have any suspect, of course, I’m biased here, but it would be interesting to look at the role of agency in evolution, because I don’t think you can get this sort of open-ended evolution without agents. And this is what I talk about when I use the term evolvability here. It’s a very specific sense, a very strong sense. So true innovations that are not just farmers making the argument. That evolution is just sampling from a huge platonic space of ideal forms, which is bizarre. And I think Castleman’s argument is directly opposite. Uh, this sort of view, I like it much better because it’s process oriented and we don’t have this pre-existing spaces, possibilities where you cannot formulate it.  </p>



<p>Paul    00:53:39    Is there a room for good enough AGI or good enough AI that we would be satisfied, but I don’t really even know what the goal of AGI is. So if you asked different people and they have different answers, but, um, you know, w will we be satisfied that we’ve created something good enough using something like, you know, uh, reinforcement learning algorithms, right? Where you, you are, you know, you’re still externally giving the objective function, um, as it’s, as it’s, as the agents scare quotes motivation. Right. Um, you know, a lot of, you know, a lot of people are talking about reinforcement learning, being enough to get to AGI. Uh,  </p>



<p>Yogi    00:54:17    Yeah. So the question is, what do you want? I say, I, it’s not clear to me. I mean, you can, you know, what is it, Eliza pass the Turing test for a few minutes, lots of people. So that’s, I think it depends on how you set the bar. And also if you want, I mean, we’re happy with annotations. So AI can produce a lot of imitations of creativity and true life. And we’re very convinced by that because it’s very good at doing that. And so the question again is why would you be wanting to do this? It’s just, I think for me, this discussion is important because if you read around in rationalist circles and all that, a lot of stem, uh, there’s a beautiful book called depressive, um, which is listing all the different, uh, existential risks to humanity right now. And then always near the top of the list is this, um, generally AI sort of replacing us, what will kill us is Facebook, you know, watch the social dilemma, fret about Skynet  </p>



<p>Paul    00:55:21    Is  </p>



<p>Yogi    00:55:24    So that’s much more immediate and much more dangerous and Skynet, don’t worry about Skynet. That’s not going to happen anytime soon. You may actually have to evolve synthetic life to get that, you know, as sort of, uh, an AI implemented in a synthetic life form to get that, you know, but now we’re talking about science fiction.  </p>



<p>Paul    00:55:42    Well, well, right, but that’s, yeah, I was going to ask you about that. I mean, you know, so is life a necessary, um, precursor to intelligence or do we need to reconceptualize what we mean by the thing we call intelligence? Do we, I don’t even know what the hell we’re talking about, to be honest.  </p>



<p>Yogi    00:56:01    Yeah, it’s very, I mean, there are arguments back and forth. Whether you could implement the principles of organization for living organism are not necessarily dependent on the material substrate. Of course, the material substrate needs to have certain characteristics and the argument has to be made by Alvera Marino and the ranchettes among other people that you need, uh, an organic substrate to get that you can implement that mechanically. It’s just not feasible, but I, I wouldn’t, uh, venture too much out on that. Lynne, you know, maybe we develop some kind of, you know, gray goo technology soon that the can do it. I don’t know what we’re arguing, but it’s Stewart. And Andrea here is that, um, you need a completely different architecture of your AI. And at the moment, uh, with our current technologies, I would say the only thing that can do it because suddenly they have living cell, a living organism.  </p>



<p>Paul    00:57:04    What do you think? Um, if you had to, again, speculate, you know, how far into the future do we need to go until we can artificially create it with materials and new architectures?  </p>



<p>Yogi    00:57:15    So that depends on what you could call strong synthetic biology, right? I mean, at the moment what we’re doing is we’re doing replaying some electrical engineering equivalent in organisms. We try to predict what the circuits that we built into the organisms do, and then most of them were wrong. So the true aim of such a synthetic biology group, of course, to, uh, synthesize, um, a living organization from scratch because, you know, these sort of publicity stunts that vendor, for example, that saying, oh, you know, we we’ve synthesized the genome and then put it in a silent, at least they cheated in two ways. First of all, the genome was based on existing genes from a host organism. And of course the satellite transplanted into, was it living, uh, saying same thing again, I shouldn’t have been living organism. So, so that’s not creating life at all.  </p>



<p>Yogi    00:58:10    So it would be to, to synthetically produce a, uh, organization with closure that shows all kinds of signs of life and has agency and this strong sense that I was trying to convey, uh, in my lecture and my recent work. So, uh, I think that is something that current AI certainly cannot. I mean, if we talk about what we’re doing right now, this is impossible to do, but I, I don’t want to speculate whenever you say it’s impossible to do something, technologically someone comes along and does it. So, uh, whether we will have in the near future, such artificial organizations, life-like organizations, this is a very open person. That’s a nice challenge for engineers and biologists out there, and that’s a bit Frankensteining. It has to be regulated quite a bit in my opinion, and we’d have to proceed with extreme caution, also not to release this kind of stuff into the environment.  </p>



<p>Paul    00:59:07    Hmm. I know you’re not a neuroscientist or a, an AI practitioner, but a lot of the people I have on the podcast use deep learning networks, which is a large part of current AI to study what’s going on in brains. And, um, you know, make a story about how, uh, the, the network and its dynamics are similar to brain dynamics. Um, do you have thoughts on that kind of approach or do you see it as fundamentally limited in the same way?  </p>



<p>Yogi    00:59:35    So I see, I see it as fundamentally limited, and it’s a, it’s a huge step forward to just sort of looking for certain circuits in the brain and say, this circuit does this, and this does that this circuit doesn’t do anything. You can run different processes on the same circuit. So in that sense, if you’d step forward on the other hand, it’s, again, it’s just a very fixed, uh, you know, traditional sort of dynamical systems approach that would not give you true, uh, autonomy as cognitive processes, because it’s just algorithmic it’s limited in that way. It’ll, it’ll allow you to simulate probably a lot of the aspects of cognitive processes. So I have to say, uh, and this is very important that, um, a large part of an organism, a large part of our brain works in mechanistic way. So we can get a long way by studying these systems, dynamical systems approaches.  </p>



<p>Yogi    01:00:30    So I think we can definitely make pragmatic progress, uh, even great progress. We can sort of approach us that they live not in the end, gives us a complete ever give us a complete picture or a very deep understanding of how brains work because they’re intrinsically limited, uh, to mechanism to algorithm. Uh, and for me, I mean, if we can show that even a simple bacterium as agency that is not algorithmic, then we don’t have to discuss whether the brain is a Turing machine anymore. You know, some of the processes that run in the brain may be a light computation and turn sentence, but the whole brain, the whole organism is why should it be captured by, by this limited technological metaphor that we’re using here? Right? I mean, it makes no sense. We have to prove that for, so I think the burden of proof that simply the people that claim that to be true, and they often say it’s evident, but it’s not. I don’t think it is. I’ve never seen a convincing argument for it.  </p>



<p>Paul    01:01:30    I mean, there are people like mark bickered who kind of rail against computational ism, right? The computational approach to understanding, but you have to admit that it has been one hell of a successful perspective on advancing our understanding of at least certain aspect, those aspects that you’re talking about, that you can understand in mechanistic terms,  </p>



<p>Yogi    01:01:49    I have colleagues in evolutionary biology who go against molecular biology and molecular reductionist approach is saying we’re against that. That’s absurd. So this is a very successful science and it’s brought us a lot of really interesting and important insights. And, uh, the trick is to realize its limitations just like computational ism in cognitive science. Right? It’s a very useful approach, but just like any perspective, if you’re a perspective, as you realize that is just one perspective you can take and it’s useful in certain contexts what’s happening in both neuroscience and the life sciences is that, um, this sort of genetic paradigms and these metaphors, like the genetic program, all of that, and then program metaphors and the neuroscience has, has been taken massively out of context and used to explain a way, you know, phenomena beyond their boundaries. So basically we inverted the argument, if it doesn’t fit their paradigm, um, then it’s not real. And this, you see in the literature about agency, uh, all over the place. So, um, this sort of, uh, trying to explain it a way rather than to explain it, because then we can save mechanistic approach instead of saving the phenomena and taking them serious. And I think that’s just upside down, if you’re a true in purses, you’re taking those phenomenon series and you’re not just dismissing them, um, because they don’t fit in your preconceived paradigm of how you should do science. Right.  </p>



<p>Paul    01:03:21    So one of the things that, um, Marino and Marcio talk about, um, in their book on biological autonomy. So, so thinking about your perspective on the agent as, uh, an autonomous organization, um, with, uh, organizational closure, closure of constraints, uh, they talked about this also in their book, but they also sketch out an argument and admit that it’s an incomplete argument, uh, that they believe that the, that our brains and minds have this same kind of organizational closure autonomous from our, the rest of our organism. Right. So, you know, there are different China dolls or Russian dolls of autonomy or something, right. Um, D do you buy this perspective or do you think that we need, I mean, I know that there are multiple valid perspectives, but do you, by the organizational closure of minds,  </p>



<p>Yogi    01:04:16    I’m not sure. I think it’s definitely not clearly distinct. This is, it’s a, it’s a different type of organization. You can, they have a really nice argument. I have to say in their book about what is really basic closure. So metabolic closure, basically just keeping yourself alive, you know, and then regulatory layer on top of that, which allows you to adapt. So they call this adaptive agency. It’s just one level above basic agency, basic agencies, just sort of having a metabolism that maintains itself. If you buy into the places perspective, also the boundaries of the organism, and then you have through those boundaries and tractions at the environment, and you get through regulation, you get an adaptive type of agency where the organism is able to react, to influence influences from the environment. And you can make a really convincing argument that in some ways, these are different layers of complexity in a living system.  </p>



<p>Yogi    01:05:15    So you could make a similar advice regulation, as far as I understand, they make the argument that, um, through evolution, nervous systems have, um, autonomous themselves in this way. And, uh, I do by that general argument, uh, as plausible, um, of course, it’s just, uh, it’s just a scenario at this point. It’s hard to prove that, but it’s a possible scenario, but, um, I wouldn’t call it completely. So the danger is always near the possibility means that we can distinguish different aspects of an organism without having to separate them. Right. And so this is, uh, it’s very important and is often overlooked, uh, because you don’t have to be able to separate processes to be able to distinguish them and treat them in different ways. And again, perspective is, and helps you understand that it’s a very powerful way of understanding why that is  </p>



<p>Paul    01:06:10    W one of the things that you repeatedly bring up in your course. And I have to admit, I have not read re-engineering philosophy for limited beings yet, but, um, I have used, uh, the, the same images inspired by, you know, just, I just basically copied you, um, um, from bill . Uh, and I’m specifically thinking about the one where the causal structure at our level of organization in this world and the bio psychological thicket, um, you repeatedly, you know, point to this and say, you know, you wouldn’t expect to, to be able to have a purely req reductionist, uh, explanation, and we need, this is why perspect perspectives are good, because each perspective is a cut through this, uh, causal, bio, psychological, um, thicket. And that’s a long-winded introduction to my question, which is, do you see, so what I’ve kind of been thinking about is like, how are the brain sciences brain and mind sciences in the same predicament as the biological, uh, life evolution and genetic, um, uh, sciences, or are there important differences? Because I just see so many parallels between what’s happening and what you described that’s happening in your world.  </p>



<p>Yogi    01:07:21    That predicament is definitely the same. It gets increasingly where we’re sending links. That makes some really nice arguments saying that, um, while if you have these perspectives, they have to cut the phenomena, the ticket in, in ways that make sense as you get to evermore complex layers in the social sciences, especially, but also cognitive science, you get bigger and bigger challenges there. Because as I said in our discussion about consciousness, we have a really big problem, and that is nobody knows what we’re talking about.  </p>



<p>Paul    01:07:53    You’re going to get in  </p>



<p>Yogi    01:07:54    Trouble. I know I just said that let the hate mail come, but I think that’s, that’s a problem. I mean, you can make some really, there are some really don’t get me wrong. There’s some really interesting arguments there, but I think it’s really hard to cut that in the right way. And I’m saying, as we slowly cut our way, um, agency only has to become these kinds of questions very recently. And it’s even possible now to maybe have an empirical study of how this organization of natural agents works, but this is also very dependent on the technology we have to kind of other knowledge, we have very context dependent itself. It’s an evolution of knowledge. So I think we can, we can cut our way, uh, through the second. One of the basic insights from, uh, arguments about incompleteness in this area is that you will never have, uh, count said you will never have a Newton, uh, of a blade of grass.  </p>



<p>Yogi    01:09:01    You will never have a general theory, like in physics. And of course, comp was a big cop-out teacher said, oh, you know, you should treat organisms assists. There were mechanisms that even if they’re not, we call it this Telio mechanism. And we still have thought, for example, in the writing of Stan Bennett, he has something called the intentional stance, right? He says, oh, the lessons are mechanisms, but it makes sense to treat them as if they were enough because they behave as if they were not. And then I’m wondering why, what, what are we doing here? Why don’t we take this phenomenon of agency serious? And maybe we learn something sort of just trying to explain it away that way and saying, oh, we’re just pretending here that something has agency, because it makes it easier for us to think about it, which is a consequence it’s not consistent as for me, it doesn’t work.  </p>



<p>Yogi    01:09:47    It’s sort of, it’s something half-assed really to be quite Frank. So, um, uh, I think, um, so what I really like about website is really hard to read, but what I really like about is that he takes this idea of having to have different level explanations, really serious, um, uh, makes a very convincing argument for that. So he has a chapter where he describes the uses of reductionism. And again, he has this very sophisticated stance where he says, it’s good for a lot of things. And I just don’t want to come across here saying, okay, we shouldn’t do this. I’m just saying we have to recognize its limitations. And what we’re doing right now is that we’re going way beyond this limitations. And it causes all kinds of problems, for example, or our arrogant attitude towards nature as controllable and predictable, um, is one very big societal consequences that sort of a failure to recognize our limitations in this sense.  </p>



<p>Yogi    01:10:47    So this approach is very limited and it doesn’t apply to most things that truly interest is if you think about it, agential systems are involved in all kinds of real, uh, you know, the things that really truly interested, some that are important to our survival ecosystems, the economy, social networks that are disintegrating, um, and so on and so forth. So I think the signs of agential system is just absolutely fundamental and we do not have it yet. I mean, complexity science as it is right now, it’s just an ankle assistance because it has this notion of agency it’s just confrontation and that’s just not working.  </p>



<p>Paul    01:11:23    Even dynamical systems, um, is hard to Intuit, right? So one of the appeals of the mechanistic approach is because it’s intuitively appealing thinking about the entities and their, their parts and activities to explain the phenomenon at hand. And when you start talking about, uh, bifurcations and dynamical landscapes and trajectories, all of a sudden it gets slippery. And then, so I don’t know, is the essential perspective going to be even worse?  </p>



<p>Yogi    01:11:53    Yes. It’s going to be a lot worse than that.  </p>



<p>Paul    01:11:58    You end your course sort of it’s it’s aspirational and you, you say, oh, I’m going to have to go learn a lot more math now, you know, because it’s, uh, because it’s, it’s a real project.  </p>



<p>Yogi    01:12:11    I think one of the interesting things is to explore the limitations explicitly, so to push things. And, uh, this has, for example, not been done enough in terms of taking recursive functional programming, um, um, to its limits. These are, uh, programming approaches that allow you to operate not only on, uh, you know, the state of the system or the parameters, but on the, on the very, um, uh, the structure of the system, the operators and the system itself. So you have, so basically a program that rewrites itself, and that’s already a pretty good approximation to whether an organism is, but again, it remains algorithmic. So at some point it’ll break and we’ll get stuff that happens that is not captured by this pharma, listen to the question is where this, that happened. And again, does it happen often enough for it to be relevant, uh, important for our understanding of evolution, other systems that involve patients, you know, um, cultural evolution, the economy, um, I think I, my intuition would be that this, of course plays a huge role in social systems and probably a somewhat lesser role in evolution, but I still think it plays the curly plays a really important role.  </p>



<p>Yogi    01:13:26    And as you say, I mean, it’s comfortable. We are used as modern people, enlightened people that go into science. We think of anything that is not mechanistic is not scientific. I mean, I want to come back to that point and that’s just not right. You know, I mean, there, there is no reason. Um, my friend, uh, philosopher pennis falls, she’s, she’s making a really convincing argument that certain types of teleological explanations are completely scientific and are okay, but only certain types. For example, the teleological explanation that an organism acts in a certain way, because it wants something, he has a very strong argument that this doesn’t violate any of the sort of claims against the logical explanation that he has causation from the future, et cetera, et cetera. That’s not a problem, but of course, other people trying to convince us that evolution has an ultimate goal or something like that, it’s completely legitimate so that the line cannot simply be drawn that mechanism and then say anything.  </p>



<p>Yogi    01:14:23    That’s not a mechanistic explanation. It’s not scientific, that’s taking a very narrow, um, uh, stance on what a scientific explanation is. And that’s a pretty recent thing, you know, uh, going back, uh, at the most of the scientific revolution, because views of the world very much richer in terms of what kind of question answers to the question, why you can, you can provide, has to provide them the, and I think we’ve lost something there, you know, and, and if we don’t get past that limitation and recognize it in the first place, that’s, that’s the thing. I mean, we don’t meet, we are so embedded in it that we don’t see patients anymore. It’s like, you know, that’s the fish have knowledge of the water around it. You know, it’s like, it’s like the water we’re swimming in and we’ve completely forgotten about, um, that we’ve actually constructed this approach to the world pretty recently. And it was so successful that we’ve just completely forgotten all the other stuff that be thrown out, um, um, to make it work in the first place. And it’s time to get back to that because a lot of the problems we have right now are in understanding our situation in the world and then understanding truly complex systems that have agents in them is, uh, has to do with these philosophical questions that we’ve been discussing. And of course, uh, neurosciences are completely pivoted in that,  </p>



<p>Paul    01:15:46    Uh, this cry for theory, that we have so much data and we don’t know what to do with it. And we need theory. This is prevalent in the neurosciences, in the sciences of the mind, but watching your course. And is it just a Mirage, uh, due to your scholarship that you bring in so much, uh, biological theory that it looks to be a golden age of theory in, uh, the biological sciences? Is that a Mirage because you highlight so much, so many efforts,  </p>



<p>Yogi    01:16:19    So here’s, again, the real world. It’s a beautiful picture. I think there’s lots of good theory to be produced in neuroscience and life sciences at the moment. The problem is that, um, uh, so I think a lot of what, again, the Scientifics, the system is set up that you have to sort of shout to the world. Here I am, here I am, it’s me, it’s me, it’s me all the time. And so people produce a theory that is self-serving at the moment, it’s not targeted at a deeper understanding of a phenomenon, but just that the self-promotion. And I think, uh, our fields are flooded with this type of second theory, which gives theory again about names. So we have, uh, this sort of self-serving, uh, I would call it shallow theory, a technical term used by philosophy. Harry Frankfurt is bullshit. Um, that is increasingly prevalent for the reason that you have to produce it to be seen and heard at the moment.  </p>



<p>Yogi    01:17:15    And I think that’s a real problem. So we’re, we’re sort of flooded by this and this feels certainly be counterproductive. I’m, I’m gearing up to write a paper about the pernicious role of shadow theory, uh, which is, first of all, everything about theories theory gets a bad reputation, but the other creates an illusion of understanding where there is. And I have to say that some aspects of the discussions about consciousness are of that type, but also theory in evolutionary biology are competing and missing the target. And again, as I said before, if you have concepts, you have to analyze them and see what kind of work they do. And often we introduce new concepts and so-called frameworks that, you know, work at the moment. And so I think that’s a real problem because it makes it really hard for people to recognize the important questions. Um, again, Windset has beautiful, um, uh, arguments about this, how we really don’t realize often that we’re talking past each other, or just shouting past each other, uh, talks about these pseudo debates that happen, especially in that second causal, second rare. It’s really hard to see the forest for the trees. You know, that’s, that’s the nature of the game. It’s, it’s hard to do biology. It’s hard to do neuroscience. It’s definitely hard to do social science.  </p>



<p>Paul    01:18:38    Well, it’s hard to keep, keep a career as well. And that’s a motivator, I think,  </p>



<p>Yogi    01:18:43    Right? So we cut, we cut through, uh, you know, we, we just raised the ticket, completely burn it down, and that’s not a good way to go about.  </p>



<p>Paul    01:18:54    Well, speaking of theory, you were about to start talking about something I definitely wanted to talk about because one of the things that your course has done and you know, my other reading as well, I don’t want to give you all the credit of course, but, uh, is the idea that synthesis is not the goal. So, you know, growing up scientifically synthesis is always the goal you need to synthesize. Uh, and that’s how you understand that’s how you explain things. But one of the things that a perspectival approach leaves room for is, is that synthesis is not necessarily the end all goal. And in the course, you talk about the modern synthesis and the extended evolutionary synthesis and these attempts, uh, to quote unquote synthesize and, and come up with a grand unified theory of evolution. And there are a lot of people talking about grand unified theories and neuroscience and the brain, et cetera. And there’s a lot of plenty of pushback too. And I, you know, you would be among those pushing back on the idea of meeting a grand unified theory in this causal thicket. So, uh, I just wanted to cue you and ask you why is synthesis so bad? Yogi  </p>



<p>Yogi    01:20:07    Here, you have this process evolution that creates its only function. I would say it’s dangerous work is to create diversity. And we have some unifying principles like natural selection. Although we know even population geneticists know very well that a lot of evolutionary process don’t involve natural selection, but you have this sort of principle, but think about the complimentary aspect of evolution processes that create new phenotypes, sometimes entirely new levels of organization. And these are called major transitions of evolution, eukaryotic cells. And of course the evolution of consciousness in the end, we always end up can be seen as something like a new level of organization, a new degree of freedom, as we said before, I think that makes a lot of sense. So you wonder how, how can you have a general account of those processes that create, uh, what is actually being selected? And I don’t think we can because they are post-its of this nature that is constantly re-inventing remaking itself.  </p>



<p>Yogi    01:21:12    So they, whenever you have these, think you have the final theory, it’ll surprise you again. So for that reason it’s bizarre. So the modern synthesis, it has been argued very convincing by different people. My friend and colleague Arlin stalls, who’s others and Ron Amundsen’s philosopher of science that the synthesis was really more of a restriction. It was a form of scientific gate keeping, which was very important at the time to define a field that was new at the time. And it excluded more that it, it synthesized, um, to be honest, because all these aspects, these constructive aspects were excluded and uh, this movement, the extended synthesis is, is rightly claiming. So they’re, they’re going in the right direction there that they’re claiming these sort of neglected aspects of these constructive aspects of evolution as evolutionary biology, the mistake is or where there’s just thought it’s just a slogan.  </p>



<p>Yogi    01:22:13    Really it’s the synthesis part because there is no synthesis. The striking thing about this extended synthesis is it’s just a bunch of disconnected, um, uh, phenomena that niche construction, uh, phenotypic plasticity are thrown together. You see these diagrams and slicer where these concepts are somehow, you know, uh, put in circles around each other, but that makes no sense. It’s just a bunch of things that are partially already part of traditional evolutionary theory. If you go back to Darvin each construction wasn’t Arbonne’s books knew that earthworms makers, that is their habitat. He has an illustration on that in a book. And so it’s like, and then it’s sort of vague, right? I mean sort of throwing around, um, the concepts, uh, but if you do theory, the Siri needs to do work, you know, so I’m not saying there are lots of excellent people in this movement that do excellent empirical and theoretical work on their specific problems that they’re working on.  </p>



<p>Yogi    01:23:14    I’m saying the framework and this general idea of that there is a well definable modern synthesis that needs to be extended is, um, well, can I call it bullshit? And I think it’s just a tool, a political tool, gatekeeping tool, a tribal tool, and people are having, this is a fierce debate between if you’re between those camps, if you’re neither on the anti side or on the pro. Yes, I do feel it. I can tell you, especially if you’re a young I’m tenured researcher and it’s a pseudo debate that brings the field nowhere. And that’s sitting in that track between those two for a while now, and it’s not pleasant, but it’s very important work to do, to say, look, I mean, this is a soccer game where I want both teams to lose, you know, uh, so it’s, it’s like, I don’t think this is a productive debate because I don’t think that the conceptual frameworks that are presented are anything else, but, um, tools for politics, uh, academic politics, theoretical tools for insight.  </p>



<p>Yogi    01:24:20    And, um, I think we need to talk about this, but, um, it’s very difficult because you immediately get shouted at, and, and that’s not a healthy sort of, um, way, you know, I’m, I’m a bit provocative in this, of course I don’t mind getting shouted at, but I do think that we need a productive discussion where people try to understand the actual problems. And I think the underlying problem is exactly that the synthetic approach is completely wrong in a, in an area where you’re looking at a thicket causal thicket plus the thicket that is creating constant novelty, um, estimate you’re of the game. You just need a perspectival approach. And this has been recognized by a few people, of course. I mean, again, the go-to for that. Um, and he actually did his PhD as a philosopher in the lab. Uh, Richard Wilkinson, very good evolutionary biologist who often named as one of the core modern synthesis proponents, who is one of the best thinkers about, uh, dialectics in evolution.  </p>



<p>Yogi    01:25:20    And it’s often maligned because he was a Marxist. And so it’s not like his theory is communist. It’s actually dialectics, but I like his work, but people distanced themselves because of that. I think these kind of ideologies that intrude at the moment are a sign of a high pressure environment or the level of funding, lots of infighting more than they are of a productive, theoretical discussion. That’s basically summing it up. And I think it’s time to move beyond synthesis because a historian of science has made a really convincing argument that synthesis it’s a sort of a positivist remnant. It’s a remnant of an old philosophy of science. So everything is sort of the aim of science is producing large scale theories from which we deduce everything else. And that’s just not going to work in modern biology because we’re dealing with a novel degenerating processes, evolution and cultural evolution economy. That’s the same. So these fields need a completely different perspective Bible approach because we’re reviewing the truly complex systems.  </p>



<p>Paul    01:26:34    Then in the last few moments here, I just want to ask you zoom out and ask you some kind of career questions. If, if you’ll indulge me. Um, one, I, I, you know, your path has been unique as everyone, everyone says that their own trajectory is unique, but you know, I look at yours and it truly is if you had to go back, if you, if you thought back, would you do anything differently? Um, would you tell yourself anything or try to convince your younger self to do anything differently?  </p>



<p>Yogi    01:27:04    Yes. I mean, the one thing I wouldn’t do is sort of worry about things that are five years in the future, because anything that I worried about five years in the future never happened. And I was always somewhere completely different than I had in those five years. So this is what would be advice to young scientists. The other thing is, is if you go into science right now, I think I’ve managed to do this to a certain degree, but not enough is really do when you care about you’re passionate about. And I often managed to do this. There were a few occasions where, um, I mean there, there’s sort of two things. One is I wouldn’t work on certain topics anymore just because I think they, they, these happen very early in my career where I, I, I just thought, okay, this is going to be good for my career.  </p>



<p>Yogi    01:27:53    And I wouldn’t do that anymore, but I didn’t do that. I pretty early on decided not to do that. The second thing is don’t, don’t work for people who have the wrong attitudes towards doing science and are, uh, sort of bad, uh, instructors, mentors, um, sometimes just sort of have a hidden agenda or something or playing games, academic games, um, and that’s becoming more and more pervasive. Uh, this is this academic politics is a game that I’ve decided not to play. And this is also why I’m out of, uh, academic traditional academic career path. I don’t even want to get back into that even if I would still get a job at an academic institution. That’s my life’s too short.  </p>



<p>Paul    01:28:38    What if you could be the head of a department though, even worse? Wouldn’t it?  </p>



<p>Yogi    01:28:43    No, no, no, no. The head of an Institute, it’s this beautiful as that work was, it was very good. It was also for a limited amount of time. Fortunately, I have to say it wouldn’t have been good for me. I am an Explorer. I want to do academic research and I’ve read this in the times higher education supplement. There was this article that was entitled. If you like research academia may not be for you because it was a survey of such scientists. And they said on average, they spend 11% of the time on research. The article basically said, if you do see our ethical research, why not get a 50% job somewhere? So I’m trying to get a business model to work where I teach courses. I do retreats about the academic system for young scientists. And I I’m trying to start something called mentoring for people, this sort of dimension of learning that we often neglect personal growth in various directions, um, that is never assessed in those metrics that we have. It’s just factual knowledge and your usefulness to sit in an economic system that is not right. So it’s sort of, I’m trying to earn money with that sort of a community on a freelance basis, and then use the rest of the time for research instead of going through that, um, head of department, head of department. So has always, I worked under so many head of departments and it’s been a nightmare. I saw my future in that it was like, oh my God, no, I don’t want to do that. No, thank you.  </p>



<p>Paul    01:30:15    Yeah. Hopefully the heads of departments aren’t listening right now, um,  </p>



<p>Yogi    01:30:19    I admired them and they picked to them, a lot of them do a really good job, but I’m not cut out to do this. That’s not a judgment. I’m not saying they’re all bad or whatever. It’s just, no, I don’t want to do that.  </p>



<p>Paul    01:30:31    Don’t worry. I’ll edit out the admiration comments. So my last question, so one of the, one of the reasons that I got out of academia is because one of the reasons is that I felt like I was becoming more and more specialized and my skills were becoming deeper. Of course. Uh, and I was learning a lot more in that respect, but I was, you know, losing the forest for the trees. For instance, there is important work that was just adjacent to what I was doing that I wasn’t even really aware of, or didn’t even appreciate because I was so focused on what I was doing. And it seems like your, um, path has been of the opposite direction where you either have fought to maintain a broad picture and think about what’s important or spent extra time doing that, or somehow magically, because I know you’ve done, you’ve done a lot of hard deep work, uh, modeling, uh, these systems that we’ve talked a little bit about today. Um, but has that been, um, can you paint that picture for me? Has that been a struggle because it’s, it’s kind of going against the grain. You know,  </p>



<p>Yogi    01:31:38    I arrived at this point after 20 years of studying these genes and other flies called the gap jeans. I never, I never want to hear  </p>



<p>Paul    01:31:51    Sure, but your, but your favorite organism is Drosophila. Melanogaster right.  </p>



<p>Yogi    01:31:57    They were really, I mean, this was a very rewarding work. So I just decided, I knew my strengths was in theory very early on in my career, but I decided to work in the lab and then became a group leader exactly. For the reason that I wanted to have an empirical contribution and not just sit around and have theories about other people’s work. I think that that’s been paying off really well, always with the salt on the background. So I was privileged to do this, this crazy master’s a master’s degree in holistic science. It was amazingly a path changing for me and life changing because it focused me. I mean, I was busy. I went to Brian because I had read his book, how to leper change the spots at the time as a student. And at those times at that time, they were trying to see influential and a bunch of other people who worked in the complexity sciences and these kinds of people that I really wanted to keep that in mind.  </p>



<p>Yogi    01:33:03    And, and then I got that priming there during that one year masters course, uh, of really thinking about, uh, phenomenology master process thinking and all that really started. And I think I never, I never lost it. I keep it through also a very decade long collaboration with my friend. Who’s a biomass process thinker, and we always kept on doing that and published these papers and relatively obscure journals. But when people ask me, what are the most important papers that you’ve written my often put those papers first because they were intellectually the most, uh, sort of the guiding, you know, we did some really nice empirical stuff from the people who were involved in doing that work. I cannot tell you how great they were and how rewarding it was to work with such people. Um, but in the end, the big picture stuff, uh, is always what I wanted to focus on. It’s very difficult to do that. Um, so you have to do your evening work, um, during your normal scientific career, you have to compromise to survive if you want to survive, or then you have to decide that you, uh, will do research outside the traditional academic system, which is something that we’ve become increasingly. We have to find ways to do that still, but it will become increasingly the way to go for conceptually innovation because the academic system, I’m sorry, is not cutting it anymore.  </p>



<p>Paul    01:34:28    That’s quite a place to end it. All right, Yogi. So, uh, I, I was excited. I’ve been excited to have this conversation with you for a long time because I, I love the course and I’m so glad that you, uh, agreed to come on and talk with me. It’s been a real joy. So, uh, I’m, I’m really glad to introduce you to my podcast audience out there. You  </p>



<p>Yogi    01:34:49    It’s been absolutely fantastic to talk to you. Thank you.  </p>

</div></div>


<p>0:00 – Intro<br />4:10 – Yogi’s background<br />11:00 – Beyond Networks – limits of dynamical systems models<br />16:53 – Kevin Mitchell question<br />20:12 – Process metaphysics<br />26:13 – Agency in evolution<br />40:37 – Agent-environment interaction, open-endedness<br />45:30 – AI and agency<br />55:40 – Life and intelligence<br />59:08 – Deep learning and neuroscience<br />1:03:21 – Mental autonomy<br />1:06:10 – William Wimsatt’s biopsychological thicket<br />1:11:23 – Limtiations of mechanistic dynamic explanation<br />1:18:53 – Synthesis versus multi-perspectivism<br />1:30:31 – Specialization versus generalization</p>
]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/5beb3e83-7d5c-4917-ba7b-1609ab02c26f-118-Johannes-Jaeger-public.mp3" length="92592437"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[




Support the show to get full episodes and join the Discord community.











Johannes (Yogi) is a freelance philosopher, researcher & educator. We discuss many of the topics in his online course, Beyond Networks: The Evolution of Living Systems. The course is focused on the role of agency in evolution, but it covers a vast range of topics: process vs. substance metaphysics, causality, mechanistic dynamic explanation, teleology, the important role of development mediating genotypes, phenotypes, and evolution, what makes biological organisms unique, the history of evolutionary theory, scientific perspectivism, and a view toward the necessity of including agency in evolutionary theory. I highly recommend taking his course. We also discuss the role of agency in artificial intelligence, how neuroscience and evolutionary theory are undergoing parallel re-evaluations, and Yogi answers a guest question from Kevin Mitchell.



Yogi’s website and blog: Untethered in the Platonic Realm.Twitter: @yoginho.His youtube course: Beyond Networks: The Evolution of Living Systems.Kevin Mitchell’s previous episode: BI 111 Kevin Mitchell and Erik Hoel: Agency, Emergence, Consciousness.


Transcript

Yogi    00:00:03    They’re presented as an explanation of what’s going on, why other than they don’t really explain anything right. That, that was one of my problems. Um, they just showed that the system is complicated. Basically. I wouldn’t even call it complex. And so, um, I became frustrated with this is you’re really a process thinker, and I think that’s really important here. You need to get, let go of those, those fixed structures. I mean, we, we can only study small aspects of development and evolution using dynamical systems theory, but we cannot capture the agency of the organism so successful that we’ve just completely forgotten all the other stuff that we’ve thrown out to make it work in the first place. And it’s time to get back to that because a lot of the problems we have right now are in understanding our situation in the world and then understanding truly complex systems that have agents in them. And of course, neurosciences are completely included in that  



Speaker 0    00:01:09    This is brain inspired.  



Paul    00:01:22    Hello, it’s Paul. On the episode today, I have a chat with Yohanis Yeager who also goes by Yogi, which is what I call him during the episode on his website, Yogi bills himself as a freelance philosopher, a researcher and an educator. And he’s actually done a lot of empirical research in systems science and evolutionary biology and a range of interdisciplinary topics as well. The reason he’s on the podcast is because I recently took his online YouTube course called beyond networks, the evolution of living systems. So the course covers a lot of ground, uh, but it’s roughly about how, because of the complexity...]]>
                </itunes:summary>
                                                                            <itunes:duration>01:36:08</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 117 Anil Seth: Being You]]>
                </title>
                <pubDate>Tue, 19 Oct 2021 18:03:12 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-117-anil-seth-being-you</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-117-anil-seth-being-you</link>
                                <description>
                                            <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2021/10/art-117-01.jpg" alt="" class="wp-image-1456" />



<p class="has-text-align-center"><strong>Support the show to get full episodes and join the Discord community</strong>.</p>



<div class="wp-block-image"><a href="https://www.patreon.com/braininspired"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2021/09/become_a_patron_button@2x.png" alt="" class="wp-image-1434" /></a></div>



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2021/10/Anil-head.jpg" alt="" class="wp-image-1457" width="200" height="200" /></div>



<p>Anil and I discuss a range of topics from his book, <a href="https://www.penguinrandomhouse.com/books/566315/being-you-by-anil-seth/#">BEING YOU A New Science of Consciousness</a>. Anil lays out his framework for explaining consciousness, which is embedded in what he calls the “real problem” of consciousness. You know the “hard problem”, which was David Chalmers term for our eternal difficulties to explain why we have subjective awareness at all instead of being unfeeling, unexperiencing machine-like organisms. Anil’s “real problem” aims to explain, predict, and control the phenomenal properties of consciousness, and his hope is that, by doing so, the hard problem of consciousness will dissolve much like the mystery of explaining life dissolved with lots of good science.<br /></p>



<div class="wp-block-image"><a href="https://www.penguinrandomhouse.com/books/566315/being-you-by-anil-seth/#"><img src="https://braininspired.co/wp-content/uploads/2021/10/Being-You_jacket-679x1024.jpg" alt="" class="wp-image-1458" width="170" height="256" /></a></div>



<p>Anil’s account of perceptual consciousness, like seeing red, is that it’s rooted in predicting our incoming sensory data. His account of our sense of self,  is that it’s rooted in predicting our bodily states to control them.<br /></p>



<p>We talk about that and a lot of other topics from the book, like consciousness as “controlled hallucinations”, free will, psychedelics, complexity and emergence, and the relation between life, intelligence, and consciousness. Plus, Anil answers a handful of questions from Megan Peters and Steve Fleming, both previous brain inspired guests.</p>



<ul><li><a href="https://www.anilseth.com/bio/">Anil’s website</a>.</li><li>Twitter: <a href="https://twitter.com/anilkseth">@anilkseth</a>.</li><li>Anil’s book: <a href="https://www.penguinrandomhouse.com/books/566315/being-you-by-anil-seth/#">BEING YOU A New Science of Consciousness</a>.</li><li>Megan’s previous episode:<ul><li><a href="https://braininspired.co/podcast/73/">BI 073 Megan Peters: Consciousness and Metacognition</a></li></ul></li><li>Steve’s previous episodes<ul><li><a href="https://braininspired.co/podcast/99/">BI 099 Hakwan Lau and Steve Fleming: Neuro-AI Consciousness</a></li><li><a href="https://braininspired.co/podcast/107/">BI 107 Steve Fleming: Know Thyself</a></li></ul></li></ul>


<div class="lightweight-accordion"><span>Transcript</span><div class="lightweight-accordion-body">

<p>Thanks for being here. A huge congratulations on the book. I know I sent this to you in email, but, uh, it was very refreshing, refreshing, really clear, and, uh, the writing style was just easy and fun to read. So nice job.  </p>



<p>Anil    00:03:49    Thanks for that. That means a lot. And thanks for having me.  </p>



<p>Paul    00:03:52    We’re going to, I don’t want to bury the lead here, but I want to ask a couple of questions just about writing the book and the book itself before we get into its contents. Um, so on a recent episode I had, uh, Steve Grossberg on and by the way, I have you, um, heard his or read his, uh, his recent tome or any of it conscious mind resonant brain. I have,  </p>



<p>Anil    00:04:15    I haven’t yet. I have ordered it, uh, but I have not yet read it. Okay.  </p>



<p>Paul    00:04:19...</p></div></div>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[




Support the show to get full episodes and join the Discord community.











Anil and I discuss a range of topics from his book, BEING YOU A New Science of Consciousness. Anil lays out his framework for explaining consciousness, which is embedded in what he calls the “real problem” of consciousness. You know the “hard problem”, which was David Chalmers term for our eternal difficulties to explain why we have subjective awareness at all instead of being unfeeling, unexperiencing machine-like organisms. Anil’s “real problem” aims to explain, predict, and control the phenomenal properties of consciousness, and his hope is that, by doing so, the hard problem of consciousness will dissolve much like the mystery of explaining life dissolved with lots of good science.







Anil’s account of perceptual consciousness, like seeing red, is that it’s rooted in predicting our incoming sensory data. His account of our sense of self,  is that it’s rooted in predicting our bodily states to control them.



We talk about that and a lot of other topics from the book, like consciousness as “controlled hallucinations”, free will, psychedelics, complexity and emergence, and the relation between life, intelligence, and consciousness. Plus, Anil answers a handful of questions from Megan Peters and Steve Fleming, both previous brain inspired guests.



Anil’s website.Twitter: @anilkseth.Anil’s book: BEING YOU A New Science of Consciousness.Megan’s previous episode:BI 073 Megan Peters: Consciousness and MetacognitionSteve’s previous episodesBI 099 Hakwan Lau and Steve Fleming: Neuro-AI ConsciousnessBI 107 Steve Fleming: Know Thyself


Transcript

Thanks for being here. A huge congratulations on the book. I know I sent this to you in email, but, uh, it was very refreshing, refreshing, really clear, and, uh, the writing style was just easy and fun to read. So nice job.  



Anil    00:03:49    Thanks for that. That means a lot. And thanks for having me.  



Paul    00:03:52    We’re going to, I don’t want to bury the lead here, but I want to ask a couple of questions just about writing the book and the book itself before we get into its contents. Um, so on a recent episode I had, uh, Steve Grossberg on and by the way, I have you, um, heard his or read his, uh, his recent tome or any of it conscious mind resonant brain. I have,  



Anil    00:04:15    I haven’t yet. I have ordered it, uh, but I have not yet read it. Okay.  



Paul    00:04:19...]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 117 Anil Seth: Being You]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2021/10/art-117-01.jpg" alt="" class="wp-image-1456" />



<p class="has-text-align-center"><strong>Support the show to get full episodes and join the Discord community</strong>.</p>



<div class="wp-block-image"><a href="https://www.patreon.com/braininspired"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2021/09/become_a_patron_button@2x.png" alt="" class="wp-image-1434" /></a></div>



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2021/10/Anil-head.jpg" alt="" class="wp-image-1457" width="200" height="200" /></div>



<p>Anil and I discuss a range of topics from his book, <a href="https://www.penguinrandomhouse.com/books/566315/being-you-by-anil-seth/#">BEING YOU A New Science of Consciousness</a>. Anil lays out his framework for explaining consciousness, which is embedded in what he calls the “real problem” of consciousness. You know the “hard problem”, which was David Chalmers term for our eternal difficulties to explain why we have subjective awareness at all instead of being unfeeling, unexperiencing machine-like organisms. Anil’s “real problem” aims to explain, predict, and control the phenomenal properties of consciousness, and his hope is that, by doing so, the hard problem of consciousness will dissolve much like the mystery of explaining life dissolved with lots of good science.<br /></p>



<div class="wp-block-image"><a href="https://www.penguinrandomhouse.com/books/566315/being-you-by-anil-seth/#"><img src="https://braininspired.co/wp-content/uploads/2021/10/Being-You_jacket-679x1024.jpg" alt="" class="wp-image-1458" width="170" height="256" /></a></div>



<p>Anil’s account of perceptual consciousness, like seeing red, is that it’s rooted in predicting our incoming sensory data. His account of our sense of self,  is that it’s rooted in predicting our bodily states to control them.<br /></p>



<p>We talk about that and a lot of other topics from the book, like consciousness as “controlled hallucinations”, free will, psychedelics, complexity and emergence, and the relation between life, intelligence, and consciousness. Plus, Anil answers a handful of questions from Megan Peters and Steve Fleming, both previous brain inspired guests.</p>



<ul><li><a href="https://www.anilseth.com/bio/">Anil’s website</a>.</li><li>Twitter: <a href="https://twitter.com/anilkseth">@anilkseth</a>.</li><li>Anil’s book: <a href="https://www.penguinrandomhouse.com/books/566315/being-you-by-anil-seth/#">BEING YOU A New Science of Consciousness</a>.</li><li>Megan’s previous episode:<ul><li><a href="https://braininspired.co/podcast/73/">BI 073 Megan Peters: Consciousness and Metacognition</a></li></ul></li><li>Steve’s previous episodes<ul><li><a href="https://braininspired.co/podcast/99/">BI 099 Hakwan Lau and Steve Fleming: Neuro-AI Consciousness</a></li><li><a href="https://braininspired.co/podcast/107/">BI 107 Steve Fleming: Know Thyself</a></li></ul></li></ul>


<div class="lightweight-accordion"><span>Transcript</span><div class="lightweight-accordion-body">

<p>Thanks for being here. A huge congratulations on the book. I know I sent this to you in email, but, uh, it was very refreshing, refreshing, really clear, and, uh, the writing style was just easy and fun to read. So nice job.  </p>



<p>Anil    00:03:49    Thanks for that. That means a lot. And thanks for having me.  </p>



<p>Paul    00:03:52    We’re going to, I don’t want to bury the lead here, but I want to ask a couple of questions just about writing the book and the book itself before we get into its contents. Um, so on a recent episode I had, uh, Steve Grossberg on and by the way, I have you, um, heard his or read his, uh, his recent tome or any of it conscious mind resonant brain. I have,  </p>



<p>Anil    00:04:15    I haven’t yet. I have ordered it, uh, but I have not yet read it. Okay.  </p>



<p>Paul    00:04:19    Well, it is, it is a massive, massive book and it’s just kind of a collection of his work and it took him like 30 years to write. Uh, and, um, this is the same, uh, like mark, I had mark Bickert on and he’s not done writing his 30, 40 year book. Uh, but you’re like 25 years old. Right. So yours couldn’t have taken that long to write. How long, how long have you been working on this?  </p>



<p>Anil    00:04:40    Hold on, did you say I’m 25 years old? I wish I was 25 years old. I’ve been, I guess, in the business for about 25 years. So it might be my, uh, you know, I started out in the mid nineties. Um, it took me a bout, was five years that I signed an agreement to write the book. And then of course I think like many people I just said, wow, that’s great. Now I’ll put that away. And, uh, and forget about it for a bit. And I think I started seriously writing it, uh, about three and a half years ago. I think it was took about three years all in all to write.  </p>



<p>Paul    00:05:18    Have you always been such a clear writer?  </p>



<p>Anil    00:05:21    Of course not. No, it’s it’s, I think it’s, it’s really a skill that’s to be learned. And of course with this book, what clarity there is, is also great credit to the editors that I had to, and the contribution that a good editor can make to a book is just really hard to overestimate. And then they can do amazing things with otherwise garbled and completely incoherent text.  </p>



<p>Paul    00:05:45    Yeah. I was harping on this fact, uh, many scientists and researchers don’t seem to use editors and the work suffers because of it. So, uh, I just appreciate that in general. So I told you, I have, uh, I have a handful of questions, uh, from some folks that, you know, and some that you don’t, uh, and I thought I’d start off by playing one of those questions here, as it pertains to communicating, uh, consciousness. Right? It does. It seems like there was a slew of consciousness books right now. It’s like the golden era of consciousness books. Um, do you agree with that?  </p>



<p>Anil    00:06:21    I don’t dunno. There always seems to be a slew of consciousness book, so that’s one of the, one of the beautiful, but also slightly challenging things about the area everybody’s interested, but there’s a lot of stuff out there too. A lot of competition.  </p>



<p>Paul    00:06:32    Maybe, maybe it’s my bias since I do a podcast. Okay. So I’m going to play this for you, and then I’ll let you answer here.  </p>



<p>Megan Peters    00:06:40    Hi, and Neil and Paul. This is Megan Peters at UC Irvine. Thanks so much for giving me the chance to ask some questions. What do you see are the greatest challenges in public outreach about consciousness science right now? And relatedly, what do you think are some of the most promising approaches to solving those challenges?  </p>



<p>Anil    00:07:02    Hello, Megan, how nice to hear for me? And, um, yeah, I, well, that’s a, it’s a good question. Of course. Um, Megan is a, a colleague and friend of mine. Who’s doing brilliant work in metacognition and consciousness. And I think she’s been on your podcast too. I think I remember hearing it. It was a great episode. The main challenge from my perspective is I think there are two challenges. The first is probably common to a lot of public communication of science in general, which is how do you express things, accessibly, and clearly without oversimplifying, that’s just a hard balance to find, but it’s a possible balance to find. And the approach to that is just continually trying to refine the way you put things, the examples that you might use, the metaphors that you might use, I’m always worried that I’ve oversimplified, that I’ve misrepresented others’ opinions or misrepresented the literature in general.  </p>



<p>Anil    00:08:03    It’s hard to get away from that worry. Uh, but you can’t cover everything. You can’t caveat absolutely everything either. You have to have the message you want to convey, but make sure that the evidence, the arguments that you’re resting, that message on they stand up. I think that’s one general challenge for public communication of science. The other one for consciousness is an amazing thing about consciousness working on consciousness is really that it’s not hard to get people motivated by the topic itself. People will just come to the table with a strong April, very interest in consciousness usually,  </p>



<p>Paul    00:08:44    And a priority view on it. Right.  </p>



<p>Anil    00:08:46    Well, that’s right. Yeah. They also come with very strong opinions about certain things about consciousness, what it is, what the definition is, what something like freewill might mean. And sure, we’ll talk about that too. So here, the challenge is how to actually engage with preexisting views rather than just try to dump my own views into somebody else’s mind, how to turn it from election to it dialogue.  </p>



<p>Paul    00:09:13    Hmm. All right. Very good. Well, I don’t want to bury the lead anymore. So I’m going to start actually, by reading a quote from your book that is kind of the crux of the, of your message, and then I’ll let you unpack it and, and, and, you know, kind of give an overview of, of your views. Uh, before we move on, like I said, I have, um, I have a host of my own questions of course, way more than we’ll get to, but then, uh, I got a lot of, um, questions from, from people like Megan. So it will be hearing from her again. So I want to make sure that we get to those. Okay. So, uh, this is from the book. This is about two thirds of the way through. Maybe this for me is the true ground state of conscious selfhood, a formless shapeless control, oriented perceptual prediction about the present and future physiological condition of the body itself. This is where being you begins. And it is here that we find the most profound connections between life and mind between our beast machine nature and our conscious self. So, uh, I don’t know if you feel that that kind of EnCap encapsulates the whole message, but there’s a lot to unpack there as well. So I’m going to let you have the floor  </p>



<p>Anil    00:10:23    Well, great. There is a lot to unpack there because that’s in a way, that’s the culminating statement for the book’s main argument. So I don’t want to try and explain the whole book in, in, in unpacking that, that summary, but I think what’s important about it is, is the first thing I say about it is this wasn’t really the idea that I set out with it at the beginning, this idea that this ground state of self is, is in a predictive control oriented perception about the physiological condition of the body and its trajectory. That’s something that, that came as I followed the thread of ideas over the years. And I think that’s one of the rewarding things also about writing the book. It wasn’t that it was just putting down all the ideas already had. The writing of the book was an extremely for me by turns frustrating and challenging, but also rewarding way of putting all the different threads and weaving them together into something that was new to me as well, by the time I’d finished writing it.  </p>



<p>Anil    00:11:26    And this is particularly true in this connection between life and consciousness. So the idea here is that I started to think many, many years ago about perception as this form of prediction about the causes of sensory signals. This is not a new idea. This goes back to him and Von Helmholtz in the 19th century. People talk about it in terms of predictive coding, predictive processing, active inference, all these related ideas that see perception as an inside out top down construction, rather than an outside in bottom up reading out of the world around us. The key part of that story for me was that the contents of what we perceive the nature of it, the, the character of the different perceptual experiences that we have should relate to the kinds of predictions that the brain is making about the sensory signals to visual, that experiences have a particular character because the brain is making predictions about how visual signals relate to behavior.  </p>



<p>Anil    00:12:33    What happens when we move our eyes, for instance, these sorts of things, but the brain is also dealing with perception and control of the body. And the body is as remote to the brain as the outside world in, in, in the fundamental sense that it has no direct access to what’s going on in the body. It still has to infer what’s going on in the body on the basis of noisy and ambiguous sensory signals. So there’s this process or the argument is there’s this process of predictive perception about the interior of the body, but experiences of the interior of the body. They aren’t like visual experiences. They don’t have colors and shapes and locations in space. They have valence, things are good or bad or likely to be good or bad in the future. And so the idea to connect the two kinds of experiences is that, well, the predictions that are involved in perceiving the interior of the body have a different function, instead of figuring out where things are to a first approximation, they’re about controlling and regulating things.  </p>



<p>Anil    00:13:38    That’s why we have brains in the first place to keep the body alive. And so if a prediction is being used for control, and there’s a whole literature from cybernetics and control engineering, and now to free energy principle and active inference that tells us that to control something, you need to be able to have a good predictive model of it. Well, then the character of the resulting experience can be understood as, as emerging from a relating to that control oriented function. So this is a good for me handle on why self related experiences feel differently from let’s say visual experiences of the outside world, and it underwrites this close link between life and mind, because now these very basic experiences of just being a living organism. And here’s where I do talk a little bit vaguely about the phenomenology, the experiential character of this. What, what really is the base level of being a self it’s not the identity that you have with the name and the set of memories.  </p>



<p>Anil    00:14:43    It’s not really even the experience of what objects in the world is my body. There is, I think for me, this very deep lying sense of just being a living organism and that, and this is the proposal. It’s not something that I can justify on the basis of clear experiments with data that, that, that experience, that Bazell experience emerges from the, the role of the brains predictive mechanisms in regulating the body, and then everything else flows from that. So all the perceptual mechanisms that are, are, and now dealing with the outside world or with the body as an object in the world, all have that evolutionary developmental and kind of moment to moment functions being grounded in this basic imperative to stay alive. I mean, this is not a completely new idea either, right? I mean, it’s got lots of resonances with people like Antonio Damasio, uh, with mark Solms with Lisa Barrett, um, with Evan Thompson, there’s lots of rich territory, rich literature exploring these life, mind connections, but my way of doing it is to emphasize predictive perception and predictive processing as the common thread.  </p>



<p>Paul    00:15:57    Yeah. So the, um, I mean, there’s a lot of different ways to go here. One of the things that, you know, so you talk about, uh, the possibility you end up talking in the book about the possibility of AI having consciousness, and maybe we’ll come onto that later. But one of the things that I have as my own thinking about consciousness has very minimally developed over the years. It seems because I have, I’ve not thought nearly as deeply about these sorts of things is that I do appreciate the richness of the, you know, the feeling of identity of having a self that you’re saying, you know, and the narrative self and personal identity self, uh, there’s a certain richness there that you’re saying doesn’t need to be accompanied in the, um, uh, predicting our life control setting processes to stay alive. And so then that, of course, uh, makes you wonder about all the different animals and organisms and their level of, you know, I suppose, minimally, they would need to have a predictive mechanism to, you would say that, um, the conscious experience is somehow situated in that predictive, uh, cognitive mechanism. Uh, but, and, and you write about, of course you write about animals in the book, but, um, maybe you can just describe what it means you think for the, the experiences of other animals, uh, and then we’ll come onto the experiences of other people as well.  </p>



<p>Anil    00:17:20    Yeah, I think that that’s a very rich topic. It’s very important topic as well, because one of the main implications of a, of a well-grounded science of consciousness is to make informed judgments about the potential for suffering and the potential space of experiences of non-human animals. And there isn’t inevitable tension here because so far we just lack a consensus view on the sufficient mechanisms for consciousness. We, there are competing theories, we have different ideas. Um, so at the moment inferences about other animals is still using humans or mammals as a benchmark. You know, we, we take what we know from humans. We extend that to other animals, which, um, will mammals for instance, have pretty much the shared neural mechanisms that we know are important in humans. And then how far out can we go? This is a strategy that’s hard to get around, but of course there’s also the recognition that the way we are conscious, the way we experience having a self is not the only way it seems to be all bound together.  </p>



<p>Anil    00:18:27    So this is the thing at the experience of being a human self is that we have all of these different aspects of it, a name and identity over time experiences of agency, of free world of seeing the world from a first person perspective of having a body of being a body of being the seat of emotions. All these things seem unified, but of course they aren’t neurology and psychiatry tells us that they aren’t and various experiments tell us that they aren’t as well. And if they aren’t necessarily unified, then there are different ways they might come together in different people, but also in different species. So the space of other minds is, is very large. And what can we say about that space? Well, we can’t have the experiences of another species or indeed of another person. And this is a very old point in philosophy, Thomas Nagel, what is it like to be about?  </p>



<p>Anil    00:19:17    But that doesn’t mean we can’t understand from a third person perspective, something about what those experiences might be like, if I can characterize, for instance, what the differences between a visual experience and an emotional experience in terms of different kinds of predictions, then that provides a language for thinking about other kinds of experience to how they may relate to the experiences with familiar with, even though we can’t instantiate those experiences ourselves. The harder question to answer is how far does the magic circle act stands? Know when does sentience ground into nothingness in the animal kingdom? That’s, that’s really, really difficult. I don’t think there’s a sensible way to answer that there is something about level of complexity of the nervous system that, that seems to me has to be important. It seems unlikely to me that C elegans and had this, this tiny worm with, um, 300, two neurons is, is conscious, but maybe that’s just a species specific bias on my part. I mean, we know that number of neurons per se, doesn’t matter, the cerebellum has three quarters of the neurons in the brain, and doesn’t seem much involved in consciousness if at all, to humans. So all these, these intuitions that we have, have to be very careful about the extent to which they’re based on a sense of anthropocentrism of human exceptionalism. Uh, and that’s the tension at the heart of thinking about animal consciousness. For me,  </p>



<p>Paul    00:20:47    Poor poor C elegans always, always gets shafted with regard to, uh, admitting consciousness in the NC elegance. All right. So, uh, I’m in danger of just going down the rabbit hole on my own question. So here’s what we’re going to do. We’re going to, we’re going to go through the rest of these questions and because you already mentioned some things that related to some of the questions, and we can just use that as a jumping off point to talk about, you know, things that you’ve, that you write about in the book. So, uh, here’s Meghan’s next question.  </p>



<p>Megan Peters    00:21:15    Uh, Neil you’ve described the real problem of consciousness as being separate from the hard problem of consciousness. So related to this idea, do you believe that discovering and characterizing something like a quality space, like Rosenthal’s quality space and similar kinds of, uh, writings and efforts by hoc one loud and now to chia and others, do you think this is truly going to be enough? Do you think that this is going to make the hard problem truly just disappear. If we reach a relatively full description of this quality space and relatedly, um, what do you see as the biggest challenges in making the leap from a causal description, like a quality space, a full quality space description to a true explanation of consciousness. Do you think that this distinction is also going to disappear as we get closer to such a full description?  </p>



<p>Paul    00:22:12    I told you Megan really went after it.  </p>



<p>Anil    00:22:14    Yeah, I know. And I like the way she, her questions always have a and relatedly halfway through that’s when I start to worry. So, but it is a very good question. Of course. Thank you again, Megan. Um, yeah, I talk about the real problems quite informally, really, because it’s very related to other approaches that I’m sure Megan knows, but, but more generally for, for the listeners, these are approaches like neuro phenomenology that traces back to Francisco Varella. Uh, this general idea of, instead of trying to explain how it comes about that any physical system could be identical to, or give rise to a conscious experience. This is the hard problem of consciousness broadly speaking from, from David Chalmers, how consciousness happens to be part of this physical list material, this picture of the universe, um, or what’s the relationship between conscious experiences and stuff in the universe.  </p>



<p>Anil    00:23:13    And the real problem is saying, well, let’s not address that directly. Let’s go after it in directly. Let’s try to explain, predict and control the properties of conscious experiences in terms of things happening inside brains and bodies. So these quality spaces that Megan mentioned, this is one aspect of doing that. It’s a way of trying to organize different kinds of experiences, according to metrics, you know, how similar they are, how different they are, how they relate to each other. And you can think of that as, as very related to this idea where you were talking about a bit earlier, that different kinds of predictions can go along with different kinds of experiences. It’s trying to, they’re both different ways of talking about how you organize a space of experiences and relate it to mechanisms. Um, this, by the way is not the same as David Charla’s easy problems, which are questions about how the brain works when you just basically take consciousness entirely out of the picture and just talk about function behavior and so on.  </p>



<p>Anil    00:24:15    So I do think it’s a useful middle ground. It’s a pragmatic way to approach the science of consciousness. The question is, will it be enough? And this is a tricky question to answer from where we are now with the tools and with the concepts that we have now, I think very hubristic to say it definitely will be enough, but I also think it will be disappointingly or an unwarranted, really pessimistic to say it definitely won’t be enough. I think that is a case that can be made for a healthy agnostic optimism about it. And the reason I say that is because the history of science just gives us plenty of examples, where things that have seen mysterious no longer seem that mysterious because of insights that we have, uh, because of a sort of real problem like approach. The classic example of course, is the study of life that we, instead of looking for one Eureka solution, a spark of life and our own Vitel biologist characterize the different properties of life and explain them as a related set of problems.  </p>



<p>Anil    00:25:23    And the hard problem of life wasn’t solved. It was dissolved, but life is not the same thing as consciousness. So that’s why I can’t be fully optimistic about it because you can still agree objectively on the data about life. Whereas the data about consciousness are intrinsically private and subjective. It makes it harder, but in my view, it doesn’t make it impossible. And so what will the trajectory of this approach look like? The thing that I actually think is most likely to happen and will be most explanatory in the end to Megan’s. The second part of Megan’s question is when our whole idea of explaining consciousness, uh, should achieve actually changes. Now, we, we, when we set out the problem that we think we’re setting out to solve at the beginning turns into a slightly different problem, and we see consciousness is continuous with the rest of nature. And we worry a bit less about how to explain this apparent division between the mental and the physical, the final point on this is that we sometimes ask too much of a science of consciousness. And this gets at Megan’s second point. When I think she asked what would count as a true explanation of consciousness, right. And there’s a lot, there’s a lot under that. What does she really mean? What does, what, what, what should a true explanation actually mean? And in philosophy of science, this is a complicated question.  </p>



<p>Paul    00:26:45    Yeah. I was going to ask you, um, part of my own questions, whereas, you know, when we get there too enough, I’m using air quotes. Is it going to feel intuitively, uh, satisfying or, you know, because of, uh, theories like integrated information theory that you kind of just have to accept that there’s a complexity and there’s a number and it doesn’t feel quite intuitively satisfying, but I don’t know if that’s what she means by enough, but I was going to ask you anyway, if getting there wherever there is, if that will feel, but, you know, because I’m not even sure we understand life yet. I agree that the, um, the mystery, the mystery of it, Ella and Vitale has dissolved because we started asking different questions. And I agree with you that we will need to shift our approach and shift the conceptual, um, uh, approach to, to understand what it is. But, um, and yet, I don’t know if it’ll feel intuitively, uh, satisfying,  </p>



<p>Anil    00:27:44    Right? So when it comes to life, that’s a really important point you made because indeed not everything is understood, but the sense of mystery about things being ex explicable has dissolved at least for most people. I think that’s a good indication of, of a mature science of X doesn’t have to explain absolutely every detail, but the sense of deep mystery about details being explicable should have dissolved. And when it comes to consciousness, it’s sort of often put the other way around that people might say things like, well, let’s assume we can explain every single detail wouldn’t there still be a sense of big mystery. Um, this is an odd way to put it because even the premise is not necessarily something that, that we should take as a, as a necessary criteria. The important thing is, does the overall sense of mystery dissolve? And then I think in terms of the standard criteria that we apply to scientific explanations, can we explain a phenomenon now, this it’s tricky, but here I might say, this is back to the realm of different kinds of predictions and Qualia spaces, whether they’re the sorts that David Rosen, Tyler and Megan, and hot corn loud talk about, or the Qualia spaces and integrated information theory.  </p>



<p>Anil    00:28:59    It’s another way of thinking about that. Um, explanation prediction. Can you predict when a particular kind of experience will occur and control? Can you intervene in a mechanism to bring about particular kinds of experiences in, in systematic ways? If you can do that, then you’re doing pretty well. Will this be intuitively satisfying? We’d like to think so, but I don’t think there’s any guarantee that it, that it will be. And of course, whether something like integrated information theory is intuitively satisfying, really depends on who you are. It’s firstly, it’s a very complicated, uh, theory. And when you do delve into the math, there are some beautifully intuitively appealing points about it, but there are things that challenge intuition as well. The key point though, is that we tend to require or smuggle in this criteria and for intuitive satisfaction when it comes to consciousness in a way that we don’t for other areas of science, like we, we know that quantum mechanics is just makes no sense. It makes no sense, whichever interpretation of quantum mechanics, you tend to favor. None of them make any sense at all. Uh, but it’s a beautifully successful site. It’s not a complete science, it’s a beautifully successful science. Does it have to be intuitively satisfying? I think we smuggle that into consciousness and we feel that that has to play out partly because we ourselves are conscious. We’re trying to explain us. And that I think leads us to ask different things from a science of consciousness where the scientific method may not justify as asking those things.  </p>



<p>Paul    00:30:40    Well, I think it means that we need to use quantum mechanics to, um, situate consciousness in microtubules right.  </p>



<p>Anil    00:30:45    Do not go there.  </p>



<p>Paul    00:30:49    Okay. Where we are going is, is the next question. You might recognize this chap,  </p>



<p>Steve Fleming    00:30:56    Steve Fleming hair, congratulations to the book. It’s a fantastic achievement. So my question for you is about how we should think about the contents of consciousness within the beast machine framework in biological agents, such as ourselves, there are some things we’re aware of and other processes or newer representations that we’re not aware of. And for me, modeling this kind of distinction pushes you towards a more cognitive or higher order model of how consciousness works, even when we’re thinking about embodied living systems. I suspect you disagree though, and would love to hear your thoughts, enjoy your chat with Paul, and I’ll look forward to listening.  </p>



<p>Paul    00:31:40    Another friend of yours,  </p>



<p>Anil    00:31:42    Another friend of mine. Hello, Steve, thank you for the question and thank you for your book as well and reverse congratulations to you on know thyself. It’s a, it’s a brilliant book, um, and very much enjoyed it. It’s another good question. Of course. And the answers I think I, I, I quite agree actually with, with Steve probably more than he was expecting, if you take as one of the core methods and it is one of the core methods in consciousness science to contrast of conscious versus unconscious perceptions, um, then your you’re maybe drawn to the cognitive processes that mark that distinction. I mean they can, but they can play out in different ways. They could be on some theories of consciousness, like the favorite higher order type theories of, of Steve and Megan and how allow the differences in the kind of higher order representation that in some sense, it looks down at other processes going on in lower order perceptual circuits, whether they’re to do with the, the world or the body, but these differences could also be in these lower order circuits as well.  </p>



<p>Anil    00:32:53    That explain the difference between conscious and unconscious perception. I think this is a very valid approach. I think that is, as Steve will know, there’s still a surprising degree of controversy about whether unconscious perception really exists at all. As, as a phenomenon, the closer you look, sometimes it seems to go away entirely. And it’s also a methodology that is, it works better for some kinds of experiences than others. It works to the extent that it does work. It works really well for extra receptive perceptions, vision auditions, things like that. You can have all these rapid swabbed, his toolbox of masking techniques that we can use to at least approximate this conscious versus unconscious content divine. These toolboxes, just that don’t apply  or they’re certainly not as readily available for studying processes about the perceptual regulation and experience of these deep levels of self, like emotion.  </p>



<p>Anil    00:34:01    A lot of discussion goes into questions about, are there such things as unconscious emotions? What would that mean? Is there an unconscious mood? It strikes me as completely plausible that there are some aspects of the brain regulating of the body that do not arise into our conscious experiences. That they’re very basic homeostatic reflexes that don’t seem to shape conscious contents in terms of moods and motions or anything else. And there are others that do. So the question for me is, is more, it’s a very open question. Like what, what, what level of perceptual regulation or perceptual inference are there corresponding aspects of conscious content and at some level there aren’t, and at some level that are, I think this is, this is likely to supply, although, you know, maybe less so to the body, but it’s just a much harder question to get at experimentally. And it doesn’t mean that we just give up, it just means that what are the other methods, the thinking about consciousness, uh, without using these contrasts between con conscious and unconscious perceptions. And this is where I do get drawn to these ideas, more of explaining the phenomenological properties of a conscious experience that is there rather than worrying about when it is or when it isn’t. And what marks that specific difference  </p>



<p>Paul    00:35:31    Do you think that there, um, since the majority, the vast majority of consciousness neuroscience has focused on perception and specifically visual perception that that has biased our intuitions about what might be needed, uh, in the perception predict in, you know, higher order toward higher order thought type of, um, uh, approaches, uh, because, and the other side of that coin is do you think that processes like self self-maintenance and, uh, life processes that you’re focused on have been under appreciated?  </p>



<p>Anil    00:36:07    That’s a bit of a judgment call, isn’t it? I mean, the, the, the focus on vision is, is I think very sensible in many ways. It’s if you, if you think back to the early 1990s, when Francis Crick and Christophe cock were talking for the first time about the pragmatic strategy of looking for neural correlates of consciousness, they were focusing on vision. And I mean, this is a bit of historical reconstruction, but I imagine one good reason for doing so was just that you could do experiments that way you could build on a whole literature of psychophysics to actually do those experiments. And it could just be a more compelling argument to the rest of the community in psychology and neuroscience, that there was a reasonable way to study consciousness. So I think if Kurt and Cox had started off by talking about the Deepak’s deep embodied experiences, it would have been much, much harder to sell because you can’t go and do your experiment the next day.  </p>



<p>Anil    00:37:02    You can’t do binocular rivalry and go, oh, look at this. Uh, so there are good reasons to focus on vision and visual experiences are well characterized, the level of phenomenology. We have all these super interesting aspects of it. What’s, what’s the relationship between central vision and peripheral vision. Uh, there’s a lot known about the visual cortex too. It’s, uh, it’s a relatively well understood part of the brain in terms of its organization. So these are all the good reasons. I don’t know if a study, if th this, this bias towards division also bias towards higher order theories. I think, I don’t think that’s true, actually, because there, there are people like in that block also deeply rooted in the visual tradition who put the opposite perspective and say, look, you know, visual visual experience gives us a compelling case to think that are experienced in the nature of our experiences.  </p>



<p>Anil    00:37:56    In fact, more than we have higher order access to at any given time. And this debate rumbled on, I think, between phenomenal consciousness on the one side and access consciousness in very productive ways. Actually, I think it’s a really good debate, but yes, this focus does bias against, uh, recognizing I think these deep roots of all consciousness in the regulation of the body. And as, as we were discussing earlier, it’s not that these ideas weren’t also there from the very beginning. It Demasio one of the early pioneers of consciousness science to, uh, said this very, very, very, uh, my old mentor, Gerald Edelman also talks about the role of the body in consciousness. So it’s been there all the time. Adam Thompson, Francisco, rather the embodied mind, there are, there are very deep traditions that make this point, but they don’t give you the experimental availability that the focus on vision did.  </p>



<p>Paul    00:38:55    Yeah. I mean, the vast majority of the predictive processing framework is, um, focused on vision as well because everything in neuroscience, the vast majority is focused on vision, but one of your main moves was to point that predictive processing, um, inference process to, uh, bodily processes. So, um, that was D do you, do you think that that was a key connection that you made that allowed you to start thinking about these things?  </p>



<p>Anil    00:39:21    Yeah, actually I do. And I think, I mean, the idea was around at the time. So I think Lisa Barrett in particular had a very similar idea more or less at the same time. It’s not a, it’s not a massive leap at all. They’ve been going a long history of, of thought and emotion that thought about emotions as cognitive appraisals of changes in physiological state. So there was already this sort of interpretive framework applied to thinking about emotions, uh, but it was still in this, this framework of a cognitive part of the brain and a non-cognitive part of the brain and one appraising the other. And the simple idea was just a staircase look in predictive processing, where you have this general principle that perceptions depend on the whole ensemble of top-down predictions about causes of sensory signals at multiple levels, without any clear, bright line between the cognitive and the non-cognitive.  </p>



<p>Anil    00:40:15    Well, that that just provides a, a modern gloss, a predictive processing gloss on these older ideas of how emotions are formed. They now become multilevel multimodal predictions about the causes of sensory signals, but now the sensory signals are the sensory signals that come from within the body interoceptive processes. And just making that connection is a start to then, for me, the really important part of that was thinking about these predictions is having different functions in the two cases. So we already mentioned this, that vision to a first approximation perceptual predictions, trying to figure out what’s their interoceptive predictions, try to control and regulate.  </p>



<p>Paul    00:41:01    Uh, we’re going to continue on with Meghan number three, and then we’ll have one more Megan. Okay.  </p>



<p>Anil    00:41:06    Let’s see if there’s an and relatedly halfway through.  </p>



<p>Megan Peters    00:41:09    Do you think that there is a conceptual distinction that can be drawn between the qualitative character of an experience and the content of that experience? So I’m not talking about level here, but more like the qualitative nature itself. So, uh, in other words is phenomenal character like a substrate or foundation on which the content will rest or is the qualitative character fully inextricable from the content.  </p>



<p>Paul    00:41:42    Does that make sense? And do you want to answer,  </p>



<p>Anil    00:41:46    I’ll try to answer though. I think this is more of an extended discussion than a simple question, because it turns on what precisely is meant by phenomenal character and, and content. So I think I’ll make up an example. It might not be the kind of thing that Megan has in mind, but vision has a phenomenal character. It has a spatial organization, objects have the property of objects that they seem to have locations in space in three-dimensional, um, volume, metric extensions. And then within that, against that background, we have specific experiences. I see a cup, I see a laptop computer. I see, uh, another house across the street. So are these the same thing? No, they’re not the same thing. Um, but they are to use Megan’s words. I would think inextricably intertwined, the reason I can experience an object as having the property of object, goodness, as having volumetric extension as having a back, even though I can’t directly see the bang that to me does rest on the phenomenal character of visual experiences in general, that they have this spatiality and volume metric extension in a way that let’s say emotional experiences do not.  </p>



<p>Anil    00:43:04    So this is really off the top of my head. Now I have to say, but thinking about it, it seems that the two aspects of conscious experiences co determined each other. Uh, and I wonder whether there’s any, if you subtract out all the, all the possible contents, do you have anything left? Is there some sort of rule, phenomenal character to a modality? I think actually probably, probably not probably there’s that there’s some, even the experience of nothing is a sort of specific content against that kind of phenomenal background. But, but here I am, I am speculating. And, um, this is something, yeah. Megan, we should definitely talk about more. It’s very interesting question.  </p>



<p>Paul    00:43:44    Yeah. All right. More in public. Okay. Do you want to do the drum roll or should I, here comes here comes  </p>



<p>Anil    00:43:50    The last question before from Megan pieces  </p>



<p>Megan Peters    00:43:53    Regarding the measure of levels of complexity, would you say it is a measure of conscious level only, or is it a measure of the complexity or richness of a conscious experience that can be held in awareness if that awareness exists to begin with?  </p>



<p>Anil    00:44:13    That’s another very good question on the Meghan piece is show. So we have this measure of levels of complexity. This is just to summarize it for, for people. This is, it turns out to be quite a robust measure of brain dynamics that can be used to distinguish between different global conscious states like sleep anesthesia, the vegetative states, and a paper that we did with in collaboration with Imperial college, the psychedelic state too. And what levels of complexity measures very broadly is the diversity of different patterns in a signal. So if you apply it to, let’s say EEG in the brain, the brain’s electrical activity, the higher, the levels of complexity, the more diverse the activity patterns are. And the way it works is it measures how compressible the data are. So it’s the same algorithm roughly that’s used to compress digital photos into JPEG files.  </p>



<p>Anil    00:45:12    So if you just have a photo of a featureless blue sky, it’s very easy to compress it because all the pixels are more or less the same. But if you have actually just white noise, visual, snow, you can’t compress it at all because you have to specify every pixel in every place. So the levels of complexity of a visual snow is really high of a featureless blue skies is very low. And the finding is that as you index through these global states of consciousness, the level of, or the levels of complexity goes down when consciousness fades away. Uh, so hold on. If I got that the right way round, yes, it goes down. The brains is less diverse. It becomes more predictable as you, as you lose consciousness, as consciousness is lost, the perhaps surprising thing. And it was surprising to us. This was an exploratory study, was that in the psychedelic state, it goes the other way around.  </p>



<p>Anil    00:46:07    So, uh, the brain becomes more diverse, less predictable in the psychedelic state than in the baseline of, of normal waking. Well, certainly when we did this study, this was the first time we’d seen, uh, this metric go above the level of the baseline waking state usually goes below when you lose consciousness in one way or the other. Now having explained that I’ll try and remember what Megan’s good question was, which was something it was like, is it really a metric of the level of consciousness in the sense of this distinction between wakeful awareness and anesthesia and drowsiness and all these things? Or is it indexing the richness of experience, uh, that’s possible in each of these states? I can’t, I’m not sure if I’m recollecting the question quite accurately.  </p>



<p>Paul    00:46:57    Would you want, do you want me to play it again,  </p>



<p>Megan Peters    00:47:02    Or is it a measure of the complexity or richness of a conscious experience that can be held in awareness? If that awareness exists to begin with? The  </p>



<p>Paul    00:47:13    Last part gets me to,  </p>



<p>Anil    00:47:16    So getting back to, so with that background about levels of complexity in mind, let’s, let’s return to Megan’s question, which was, is it a measure of the level of consciousness that just what it is this measure of the difference between sleep and wakefulness, or is it a measure of the richness of experiences that can be held in mind when consciousness is already there? I don’t think it’s either of those things really. I mean, th th pools of complexity just is what it is. It’s a measure of the diversity of the neural dynamics now quite what that relates to it’s, it’s a bit of both, it’s, it’s an interesting and open question. Um, it’s clearly not simply a measure of conscious level for the simple point that there probably isn’t just one scale of conscious level. There’s a nice paper by, by Adrian  I think just looking at multidimensional conceptions of conscious level, it’s not, it’s probably not as simple as it just being one point on a, on a linear scale.  </p>



<p>Anil    00:48:26    Um, there is certainly something about signal diversity levels of complexity that tracks conscious level across many of these dimensions of conscious level. Uh, but I wouldn’t say it’s identical to that thing also because when you lose consciousness, you still, you know, your brain still has some degree of, of dynamic collectivity. There’s still some values of levels of complexity going on. I think the more interesting question is indeed does it index something about the richness of experiences that are possible in indifferent states and here the psychedelic application is informative because I think it’s misleading to say that the psychedelic state is a higher state of consciousness. Although, you know, newspapers were tempted to report the finding that way. If you took levels of complexity as just a measure of conscious level, that’s the conclusion you would draw, it goes up. So psychic psychedelic state is a highest state of consciousness and sleep is a lower one, but I think it’s spot on speed or something.  </p>



<p>Anil    00:49:27    Well, I think very good questions. And I think, um, I think Daniel bore is beginning to do some work on, on looking at levels of complexity across a range of different conditions now. Uh, but it seems to me a more honest description of the psychedelic state. Not that it’s a highest state on some single scale, but that it’s an experientially, more diverse state, less constraints state. There are other theories that suggest something like this, the, uh, the Rebus theory of Robin Carhart, Harrison, Cal forest, and relaxed beliefs, um, sort of relaxed priors and a predictive processing framework that, so I think there’s at least a, a, a fairly informal way where you can say that the increased diversity or lower predictability of brain dynamics in the psychedelic state goes along with the somewhat freewheeling nature of perceptual experience. And you go to mixture of experiences of self and the world that often characterize it’s a psychedelic state.  </p>



<p>Anil    00:50:33    That’s probably a better way to think of it, but the levels of complexity measure itself, I think, has to be recognized that it’s a pretty brute force measure. I mean, it’s not that sophisticated, and this is a real challenge in, in developing measures like this. It tends to be that deliberately oversimplified metrics like levels of complexity have empirical traction, but as you make a measure, more sort of theoretically sophisticated, like one of the earliest things that I remember doing was this a measure of causal density, which is supposed to be a more principled measure of brain complexity than the samples of complexity. So a little bit in common with measures of integrated information in the sense that it’s, it’s low, both for completely random and for completely welded processes. Uh, but these more sophisticated measures just tend to not perform very reliably on empirical data. You know, you get numbers, but they’re just sort of all over the place often. And they’re very sensitive to the small differences in the data. And so they’re not yet that useful. I think that’s the real challenge is developing measures. That’s have the empirical traction of simple metrics like , but half, uh, the rest on more interesting, deeper theoretical principles that that will allow us to answer Megan’s question and then much more satisfactory way.  </p>



<p>Paul    00:52:01    And this goes back to the, you know, the brain is a complex system and, uh, you know, whether it’s going to feel intuitively satisfying when we have a good enough explanation of consciousness, because, um, I mean, you know, modern science is still grappling with complexity, even though you’re talking about measures of complexity, obviously, but, uh, this is within systems where we still don’t know what’s actually important. We don’t know all of the pieces potentially that are important to even test for empirically, right? So, uh, I mean, you’re measuring these processes, but we might not have the whole story of what is important in, in that complex, uh, realm. Now I’m just kind of drooling out and  </p>



<p>Anil    00:52:44    You’re absolutely right. And I mean, there’s something completely, right? So that’s, that’s one thing, what’s the, what’s the appropriate granularity. So we have one problem, which is, we don’t yet have the neuro imaging methods to give us both high time and space resolution and global coverage. Even if we did, we don’t really know what’s the appropriate granularity to, to look at brain dynamics or which levels of granularity are useful to look at brain dynamics, we assume sort of, is it neurons or collections of neurons, whatever. Um, it’s, you know, it’s not just like voxels, that’s pretty arbitrary, right? That’s just what an FMRI scanner can can resolve. Um, and also the there’s just, I think, a need for exploring different kinds of measures that can characterize the behavior of complex systems in general. And this goes back to that, I think a theme that we’ve had throughout this conversation, which is, is mapping between mechanism and experience going to be enough, is it going to be intuitivity satisfying?  </p>



<p>Anil    00:53:48    And one thing that speaks to that is, well, how do we construct these mappings? If we just develop correlations, like say there’s a correlation between you being conscious of a house and these brain areas being active, this is never going to be that either intuitively satisfying or that explanatorily powerful, right? It’s a correlation, but we, although this identifying of neural correlates is important, it’s just the starting point. And there are much more sophisticated ways to build bridges between mechanism and experience, quality spaces, different kinds of prediction, uh, complexity measures. And one of the things that, uh, with colleagues, Sussex and London and Cambridge now that, that I’m particularly interested in is this whole bugbear of emergence. How do we characterize emergents in complex systems and what might, uh, and as a well-grounded, but an applicable measure of emergence, tell us about consciousness. People often want to stay well clear this cause there’s a lot of rubbish spoken when people bring the concept of emergence together with consciousness.  </p>



<p>Anil    00:55:00    Generally again, thinking that the two things are quite spooky, so they probably have to be related. Uh, but there are sensible ways. Think about emergence too. There are sensible ways in which the brain acts as a whole, yet it contains many distributed parts and there are sensible ways that a flock of birds looks to be like a flock of birds, even though it’s not identical to the behavior of any individual bird coming up with a sort of right set of mathematical tools to identify and characterize emergence. It’s not going to solve the problem with consciousness, whether it’s the hard problem or the real problem, but it will give us a different perspective, a different way of relating mechanism to experience. And I don’t know what that will provide ultimately, but it might again get to Megan’s question about what are these measures telling us about phenomenology,  </p>



<p>Paul    00:55:54    Uh, nearly eight. You, uh, actually, we’ve talked about psychedelics a little bit. You actually write in the book about, of course your research on psychedelics, but then your own personal experience. I don’t know if you’re revisiting or if this was a first time experience for you. I remember trying to explain to my father driving through the Backwoods of Texas, what an, uh, an acid trip felt like. Uh, of course I was 17 or so. Um, and you know, I haven’t done psychedelics in a long time. Do I need to, uh, revisit this as an adult, as a, as an old man? Like I am,  </p>



<p>Anil    00:56:26    Well, it’s almost, Metzinger once said on another podcast, I think with Sam Harris, that there, there are serious consciousness scientists, and there are non-serious consciousness, scientists, some, something like that. And so you make it that what you will, my description in the book was, was from a relatively recent one. I grew up in south Oxfordshire in the UK. And I dunno, I mean, my, my, with hindsight, fairly sheltered upbringing that didn’t at least to provide any opportunities that I found for, uh, experimenting with psychedelics at the time. So yeah, when I, uh, decided to try them, it was from the perspective of someone who was already very interested in consciousness it’s brain basis. And what psychedelics might, might tell us about that?  </p>



<p>Paul    00:57:17    Yeah. Okay. I’ll leave it to the reader to read your descriptions in the book, but it sounded like a, um, a good experience. I had, uh, a couple, a couple unexplained, uh, unpleasant experiences, um, very vivid, vividly unpleasant, but, um, that might speak to my age then and insecurities and who knows?  </p>



<p>Anil    00:57:35    Well, I think this is, I do want to be careful about this because I think there’s quite a lot of boosterism around psychedelics at the moment as well, but it’s for it’s, uh, you know, th there, there is a huge amount of clinical potential, and I’m, I’m fully behind efforts to conduct clinical research into that, into the merits of the potential for, for clinical treatments of, especially in the domain of psychedelic assisted psychotherapy. Uh, but at the same time, they’re not a panacea. They’re not a magic bullet. People do and can have, uh, adverse experiences. And that’s not simply a matter of age that’s sometimes they just happen. It’s a matter of sentence setting as well. So I’m, I’m slightly concerned that the pendulum is swinging too far in the other direction, but there’s no doubt in my mind that they are not only potentially clinically very, very useful, but as a tool for consciousness science, they have extraordinary value because you can go in, you can make a very simple pharmacological manipulation that we know, uh, pretty well at the low level.  </p>



<p>Anil    00:58:39    What happens, we know which receptors are affected. We know where these receptors are roughly in the brain, this serotonin, uh, five HT, two, a receptors. And then we get these very reliable and very dramatic changes in the nature of conscious experience. And that does two things. It, firstly tells us that the space of possible experiences that we as humans can have is probably larger than we would realize without that. And it also just opens up this opportunity to say, okay, you change, you change one very low level thing about the brain experience changes in these dramatic ways. So what’s going on in the middle, how are the global patterns, brain dynamics changing that, explain why psychedelics have the effect that they have. And I think that’s a really important intervention and assistant is always a very valuable thing you can do when you try and explain its workings.  </p>



<p>Paul    00:59:35    So as I was going to start writing notes, Dr. Neil, Seth suggests I get a new drug dealer, but no, that’s not what you’re. That is  </p>



<p>Anil    00:59:43    Definitely not what I am saying. Okay.  </p>



<p>Paul    00:59:46    So, um, before we move on, because I want to talk about freewill and hopefully we’ll have time for a few other topics, but, uh, another thing that you read about in the book in a S somewhat related, I think actually it’s physically near, um, the psychedelics are out of body experiences. Um, what you don’t write about. Uh, but I’m going to ask about, because at some point, YouTube, uh, decided that I really liked hearing people’s vivid and, um, to them extremely convincing near death experience accounts where there’s this commonality, right, where they’re, you know, in the tunnel, they meet the, usually the guy who brings them, you know, th th they, they feel a strong, strong, spiritual presence with them that will guide them through. And they feel like they’re there for a lifetimes. Uh, then they have to decide whether they’re going to stay, you know, there’s this very common narrative, uh, that seems to go along with near death experiences. And I’m just curious if you have thoughts, whether you know, any of those narratives or not, but, uh, of what might be going on there.  </p>



<p>Anil    01:00:50    You’re right. I didn’t, I didn’t write about it. It’s a bit of a can of worms, but I just, actually, it brings up a common thread with a psychedelics, which is the danger of taking things as they seem as a reliable guide to how things are. So you, when I experienced psychedelics for me, this was so dramatic validation of a broadly materialist picture of the brain. Now you change the brain and your experience changes what could be more consonant with a materialist picture than that. Uh, but it turns out in a study by, by Chris Timmerman at, at, at, in London at Imperial, that when he did a large survey of people about how psychedelic experiences had affected their metaphysical beliefs about consciousness, for most people, it reinforced a more immaterial or dualistic perspective that, oh, consciousness is must, must, can’t be just what’s going on in my brain because it changed so much.  </p>



<p>Anil    01:01:49    And it was a sort of filter to a wider open the filter to a wider universe. And I found this very interesting, but I must admit slightly deflating. I thought, okay. Th th th I thought it would go the other way, but that was me just generalizing my own take on it. Um, and a similar thing plays out with, out of body experiences and near death experiences as well. So if you take a sort of realist view of the experiences you’re having, then you reach quite metaphysically, radical conclusions. Like I experienced that my first person perspective is now somewhere other than in my head between my eyes is somewhere on the ceiling or it’s nowhere at all. Um, if you take how things seem as how they are conclusion, well, the soul or my conscious the essence of my consciousness can in fact, leave the body similar story with near death experiences, right?  </p>



<p>Anil    01:02:48    Do you seem to be the point of, of ceasing to exist yet? All these, all these things are happening. Natural conclusion is that there’s something that persists beyond death of the body. There’s some entering into a different realm, something like that, but these explanations, we should take people’s descriptions of their experiences very seriously. And it’s very interesting that there are all these commonalities in things like near death experiences like this, this sort of tunneling of the vision, but that doesn’t mean that these experiences are direct reflections of what’s actually happening in the universe. So there are very good reasons that for instance, to think of the tunneling of vision as well, that’s different parts of the visual cortex, just shutting down according to pretty reliable patterns of blood flow. Now you can sort of see, well, as, as blood flow declines, you’re going to get peripheral vision falling away.  </p>



<p>Anil    01:03:43    Um, now I don’t know if that’s demonstratively tree, but it’s certainly for me a more plausible experience than you are literally entering a ton of light to it, to another realm. And the same goes for out of body experiences. For me, instead of demonstrating the reality that the soul can leave the body. It tells us a much more interesting story, which is that the first person perspective is not to be taken for granted. It’s a construction it’s part of the act of perception, and this fits along with plenty of other evidence that okay, you can stimulate parts of the brain as will depend fuel did back in the 1940s or fifties, or whenever it was. And transit induce an ounce of body experience, uh, tie it to the brain. Now that’s would be strange to surmise that stimulating, um, part of the brain causes the soul to temporarily leave the body, but it makes much more sense if you think of what you’ve disrupted, the circuitry that is deciding that is inferring, where in space, the first person perspective is.  </p>



<p>Anil    01:04:45    So I love all these, all these examples, but I think we just have to be careful about, uh, on the one hand respecting people’s descriptions and also respecting what it means for them. You know, it’s no good. Somebody had a near death experience is going to be one of the more meaningful experiences of their life. And it’s just not right to go and say, oh, by the way, no, that’s just your visual cortex shutting down means nothing. Now that’s not, that’s not helping anybody, but neither should we take them at face value. There’s no reason to take that explanations for what’s going on as really what is actually going on.  </p>



<p>Paul    01:05:24    I mean, one of the compelling things about, um, a high number of these stories is that they have felt that it is didn’t, wasn’t, didn’t only feel real. It was the most real thing they’ve ever felt and that after they have lost all fear of death, because they have had this experience and it has really, you know, changed their life, I would say in a good way, I’d love to not fear death. Uh, it’s not really the afterlife. I fear it’s the suffocation while I’m drowning, you know, the, the panic of my last moments, right. That’s what I actually fear, but, um, but it seemed to have had, you know, a benefit for a lot of people. So I just, I find it interesting. Okay. I, Neil. So, uh, let’s talk about another interesting topic that has been a thorny issue in the history of philosophy and continues to be a thorny issue, uh, freewill to which you devote a chapter in the book. And instead of asking you a particular question about freewill, I’m going to just let you summarize, uh, your position and account of free will, if you will.  </p>



<p>Anil    01:06:30    I will. And the first thing I want to say about it goes right back to the beginning of our conversation, because free will is not something I’ve written about in any scientific papers of mine. So, but I thought I couldn’t, I thought about it and I couldn’t have a book about the neuroscience of consciousness and self write about discussing free. Well, so it was writing this chapter was probably the most challenging chapter to write, but also again, the most rewarding cause I was really figuring out what I thought about it and how justifiable those, those thoughts were. So what are these thoughts? Well, discussions about will get derailed in so many ways. And whenever I give public talks about consciousness, when free will comes up in the Q and a it’s often the sign that, okay, we’re done, nothing else is going to come up now where we’re on the FreeWheel roller coaster and we’re not getting off.  </p>



<p>Anil    01:07:23    Uh, and it could be a number of reasons for this. I think of all the aspects of selfhood that we cling to, and that we have some just deep sets resistance to that being explained in terms of science free, where there’s probably, uh, the most clingy. Like it’s okay. If I tell someone your experience of the world is visual experience is a construction like, oh, okay. But if you, if you make the claim that no, you’ve, there is a real sense in which you don’t have the freewill, you might think you have, that can be very personally disruptive, but that’s not a good reason for there not being a good scientific mechanistic explanation of free will. And so what is that? Well, I think very much in tune with the rest of the book, the right way to understand free will is as a kind of perception, right?  </p>



<p>Anil    01:08:22    Free, well experiences of freewill arise in the field of consciousness, just as other experiences arise. Um, and the common lens that I applied to all of these experiences is a kind of perceptual inference. So what does that mean when it comes to free will? Well, firstly it means that it’s not consistent with this idea of, of a libertarian or spooky freewill, you know, this idea that there is in fact some way in which an experience of freewill in virtue of the kind of experience that it is swoops in and makes things happen that otherwise wouldn’t happen to me, that idea just makes no sense. Anyway, that disrupts the co causal closure of the universe. It requires conscious experiences to have this spooky, uh, causal power and some kind of very savvy causal power as well. That makes sure you do the thing that intervenes in the brain and exactly the right way to make stuff happen.  </p>



<p>Anil    01:09:21    Um, it’s, it’s just not the kind of will that we should be wanting to, to preserve. It’s got a lot of echoes of dualism of an immaterial mind pulling strings in a material brain and body. And once you get rid of wanting to preserve that, then one source of debate in the, in the whole area just falls away, which is this debate between determinism and in determinism in the universe. Like, does it matter if the world is completely deterministic or if there’s a bit of chance here and that, no, it doesn’t matter at all. It’s a complete red herring. Why should it matter? Because the only reason you might want a little bit of indeterminacy is so that’s where this spooky free will can come in and, and change the course of events, get a bit of elbow room for, for something spooky to him to make its play.  </p>



<p>Anil    01:10:12    I don’t think we need that, right, but you don’t want to throw the baby out with the bath water. Uh, we have experiences of free will and they mean something. We as organisms also make voluntary behaviors. There are some things that we do the causes of which are relativity immediately found in our environment and the world around us. Typical example, you put your hand on a hot stove and you recoil, there’s no experience of freewill that goes along with that reflexive, uh, um, recoil of the heart of the arm. That’s involuntary though. You said, did you say, sorry, that’s involuntary. Now that would be an example of an involuntary reflex, right? Contrast that with a voluntary action, like picking up this cup of tea and having a sip from it. Um, I decided to do that and it felt like I decided to do that.  </p>



<p>Anil    01:11:09    There was a feeling of intention for me picking up that cup and a feeling of agency, um, that accompany the cup arriving at my mouth and me taking a sip from it. These are the sorts of things that characterize the feet. The experience of free will. In fact, I think there are three things. There’s the experience that an action comes from within there’s the experience that the action is aligned with my beliefs and desires know I want, I wanted a sip of tea just then. And there’s a feeling that I could have done. Otherwise, this is the really tricky one. There’s, there’s the feeling that I might not have picked that cup up, or I might have picked something else that seems to me that those are the three characteristics of, of experiences of freewill. And I prefer to think of those characteristics in much the same way that I think of the perceptual experience of something like color.  </p>



<p>Anil    01:12:00    So we know color seems to exist in the real world. If I look out of the window, now I can see a gray sky. It’s not really a color, but it is bright. And so, um, and it really seems to be that color, right? That the color seems to exist as an objective mind, independent property of the world, but we know that’s not true. We know, and don’t need neuroscience, Newton, Suzanne, all tellers that colors are constructed by the brain. Um, I think the same thing goes for our experiences of freewill, right? An experience of freewill has this metaphysically subversive content that’s it has causal power over events. So just as red things really seem to be read the experience of, of a 3d world action is that that experience somehow had causal agency in that action. Now, redness doesn’t really exist in the world, but it’s a very useful thing for the brain to construct in the same exact way.  </p>



<p>Anil    01:13:05    These experiences of freewill don’t really have the causal power that they seem to have, but they also very useful for the brain in very specific ways. And this chapter tries to tell the story of why that, why that, so, and this is building on work by people like Patrick Haggard and Mike shadowing and, and others, um, that I think a good way to think about why we experienced voluntary actions as freely weld is so the brain can learn about what happened after then, uh, and learn about their consequences. So the brain, the organism might do things differently. The next time you can’t replay the same tape and get a different outcome. But the organism, I might sit down at the desk this time tomorrow and do another podcast. The universe will have changed. My brain would have changed from talking to you today. So if the experience of drinking tea went badly today, then I might have a glass of water tomorrow.  </p>



<p>Anil    01:13:59    That’s a useful thing for the organism to have picked up on. And the way the organism can track those regularities in the world is by sort of labeling these voluntary actions with a particular kind of character. And that character is this character of counterfactual, reality of internal origin out of alignment with, with beliefs and desires. So there is free will in the sense that we have voluntary actions and there are lawfully and meaningfully associated with particular kinds of experiences, but we don’t have the spooky kind of freewill that just leaps into the brain from another dimension.  </p>



<p>Paul    01:14:38    Do you worry though, or have you had any feedback from people who might reply that you, so even in the book you’ll use freewill in quotations because you know, like, like we were talking about earlier in the science of consciousness, we have to reframe how we think about these things conceptually, to actually get a grasp on them, but one could respond to that. What I actually do care about is this, uh, what you’re calling the spooky stuff. I, um, and, and by describing the phenomenology of it and our perception of the freewill, which might be satisfying causally, uh, it doesn’t satisfy my need and desire to feel at the helm of my own voluntary actions. When what your, um, what, you know, what you’re describing is my perception of a voluntary action, but I wouldn’t call that a voluntary action. I would say I wasn’t in control causally. Right.  </p>



<p>Anil    01:15:35    Right. And I think this is the key point, right? Here’s a situation where I do think it’s a very good example of how the mystery that we started with productivity changes for me, this is a satisfying account of freewill. Uh, it does everything it should do. And it makes sense to me as well, your example of somebody who might respond well, that doesn’t feel right to me, that it doesn’t feel like I’m in that kind of field. Doesn’t explain the feeling that I am in control. Well, there’s another, um, another issue that slipped in there, which is this idea that there is an eye that is in control. And of course, part of our conversation has been that the self isn’t, this unique thing that, that sits behind the windows of the eyes pairing out, and that decides what to do. And then contracts various muscles.  </p>



<p>Anil    01:16:23    The self is a kind of perception, whether it’s this perception of the body as a living organism, whether it’s emotion, mood, first person perspective, all of these things are aspects of perception too. So the experience of free will is not something that, uh, a self has on users in some way. It’s just part of what the experience of being a self is. And again, there are clinical examples which show us that this aspect of self, it can go away too. There’s this condition of a kinetic mutism where people specifically seem to lose the experience and ability to engage in voluntary behavior. But other aspects of their self might, might remain intact. For me, it’s a very satisfying way to think of it. And in fact, it also doesn’t, and here’s, here’s another really important point. It’s not, this just leads me into an apathetic life that I think, okay.  </p>



<p>Anil    01:17:20    Right. If free will, is in fact, a perception of voluntary action, that’s mainly useful for the future, then yeah, screw it. I don’t actually have the ability to behave in the world as I, as I, as I thought I did. No, of course, I still see red when I look out the window and see surfaces with particular kinds of reflectance is I will still experience free, will in the same way. And that is intrinsically coupled to my voluntary behavior for the reasons we’ve just been talking about. So it changes everything, but it also leaves the essential things completely unchanged that I still go about my business in the same way as before  </p>



<p>Paul    01:17:59    Looping back real quick to psychedelics does the, because it’s like a deluxe are often associated with the disillusion of the ego. Uh, do you think that there is a connection and I, I have no idea about this, that connection, um, between someone who has experienced a dissolution of the ego through something like psychedelics and the acceptance of this account of free will as, um, as satisfying  </p>



<p>Anil    01:18:25    You’d like to thank. So, I mean, again, for me, it’s incredibly, it’s incredibly compatible. Like the, this sort of ego dissolution that goes along with, with psychedelics is completely in line with thinking about freewill as this kind of perception of, of voluntary action oriented to the future. Um, but I rather worry that, uh, just as Chris Tillman and the study found that people who’ve taken psychedelics generally move away from a materialistic belief, they may also move away from the way I think about freewill as well. It depends on your starting point. It depends on where you’re coming from, what the psychedelic experience is going to do to your beliefs about these things.  </p>



<p>Paul    01:19:07    All right. Uh, I’m aware of our time here. And, uh, one of the things that you talk about in your book that I, I guess, because I’ve been thinking a lot about the relation between life and intelligence, and of course your book is about consciousness and life, and you write about how intelligence and consciousness, uh, are not necessarily orthogonal, but they aren’t along the same axes. So I wondered, um, I, I kind of want to throw all these three in the bag, uh, because I’ve come to appreciate intelligence. And I think the success of deep learning and computational neuroscience approaches, uh, to mechanistic, computational accounts of how minds and brains work has made me, um, ironically appreciate other life processes, which is interesting because you, this is, you know, the, the focus of your, uh, introspective inference, where there are PR there’s predictive processing exactly on these life processes and, uh, needing to control, uh, the processes to stay alive. So I’m sorry. That was a huge mouthful, but I’m wondering if you can explain your view on the relationship of life consciousness and intelligence.  </p>



<p>Anil    01:20:25    Definitely. Just to clarify briefly, I’ve talk about interoceptive inferences rather than introspective inferences. They’re sort of, it’s often confused, but just to make the intraception being about perceptions of the body, introspection, thinking about your own thoughts very broadly. So life consciousness and intelligence, it is a, it is a big bag. It’s a mixed bag. And the theme of the book really has led me to recognize these deep connections between consciousness, especially conscious self and life and, and the, the claim, the primary claim being that we perceive ourselves and the world around us with through, and because of our living bodies, that all the predictive machinery that underpins all our experiences operates in, in light of this primary biological imperative to stay alive. Another reason the life thing is critical here is because unlike a computer where you have relatively sharp distinction between hardware and software, and if you use a computational metaphor for the, for the mind, you tend to think of the brain is the hardware and the mind is the software.  </p>



<p>Anil    01:21:37    And maybe if you write software in the right way, does the right kind of information processing, which is a slippery return, uh, then consciousness will, would arise. Um, if you think about living systems there, isn’t such a sharp distinction between hardware and software or mind where in wetware, uh, it’s kind of just the hierarchical dependencies all the way down. I mean, this goes right back into literature that inspired me many years ago by, um, but in my Tirana about Alto pollicis and the, the sort of the way in which cells, uh, construct the components, their own components over time, they, they self generating processes. Um, and so this, just this recognition that there is no clear line between mind where, and wetware makes me very suspicious of the idea that consciousness is substrate independent, that it could be easily run on a different thing, because where does the substrate start and stop in a biological system?  </p>



<p>Anil    01:22:44    And suspicious of the idea that consciousness is simply a matter of information processing, because that tends to go along with substrate independence, um, as well, but what goes with it. But if you do think about consciousness in terms of information processing or something that might potentially be run on a, on a, with the right kind of software that tends to get groups with intelligence and this, this sort of often unstated assumption that a sufficiently intelligent computer will become conscious that consciousness is a function, maybe a complicated function, but a function of intelligence in a substrate independent way. And I just think this is, this is based on also, it might be right. Like, I cannot say that it’s not right, but I just think it’s based on a lot of questionable assumptions. There is for me, no good reason to think that consciousness is substrate independent and at least one interesting reason to think that it might not be this lack of a clear, uh, label for where the division between mind, where and wetware, and then why would we even think that consciousness is related so intimately to intelligence?  </p>



<p>Anil    01:23:58    And here I worry that, that we have a spec serve another kind of human exceptionalism that we think we’re intelligent, which is a bit questionable these days, but we’re certainly smart in some objective ways compared to other species. What we do with that smartness is the questionable part. Um, we think we’re smart. We know we’re conscious, so the two must go together. And this is a really dubious assumption because it leads us, uh, maybe to overestimate the possibility of building machines that are conscious by the way. I don’t think we should even be trying to do that. Um, and it may lead us to underestimate,  </p>



<p>Paul    01:24:39    I don’t think we’re in any danger of, uh, creating conscious machines through building  </p>



<p>Anil    01:24:44    AI. No, I don’t think we are time soon. That’s right. But, but I, I subscribed to the thing, the view here, that, for something that could be so ethically cataclysmic, um, even the tiny possibility that we might succeed is worth a little bit of worry. Part of our worry budget should be devoted that way. Not a massive amount, but part of it, but I think just more, more broadly, it seems to be often treated as this just, oh, that’d be cool. Let’s just do it. You know, even if we can’t do it. So the skepticism is often about like, oh, but you can’t know you’re not going to succeed, but I think there’s a deeper reason to question the motivations and the goals, even if they’re not achievable, because we don’t know what it would take to build an actually conscious machine, but we also don’t know what it would not take.  </p>



<p>Anil    01:25:33    We might do it by accident without realizing here, I’m actually worried about things like brain organoids, um, brain organoids, or these brain like structures of increasing complexity that are grown at scale in lamps for good reasons, for good medical reasons. Um, but these are made out of neurons. So the whole question of substrate dependence or independence goes away. And so the possibility of, uh, organoid consciousness I think is much more concerning than consciousness, suddenly shimmering into existence in my next generation laptop. Uh, so we’ve, we’ve kind of rummaged around in this bad, quiet, quite quite a bit here, but I do think it’s worth with separating the tight bounds in my mind, at least between consciousness and life and the looser bounds between consciousnesses intelligence. Now you probably have to have a minimal degree of intelligence to have a conscious experience. And, um, quite what that minimal degree is.  </p>



<p>Anil    01:26:32    It’s hard to say. I mean, intelligence is a wooly concepts, roughly doing the right thing at the right time. Uh, and being intelligent certainly gives an organism, a richer space of possible conscious experiences. As simple, relatively dumb organism might be able to experience the difference between sadness and happiness or suffering and Joyson, but, but we humans can, can experience sadness in all sorts of ways in regrets and anticipated. We regret in all these, all these things that depend on thinking about possible futures and possible counterfactual pasts. So the specific nature of conscious experience is very much tied to the kinds of thoughts and intelligent competences that we as humans have. But at root, I think it’s much more closely tied up with life,  </p>



<p>Paul    01:27:25    Given our conversation today. So, you know, I didn’t know we’re going to talk about cerebral organoids at all, but as you were speaking, I was thinking, let’s go speculative to, to at the very last moment here, I was thinking that the kind of consciousness, the quality, the phenomenal experience of a cerebral organoid would be much more alien. I to my brain than let’s say a Chimp or some someone, you know, I would imagine that a Chimp would have a much more similar, phenomenal experience to me than a cerebral organoid made from my own neurons, right from let’s say we, we, uh, use a sample of my neurons and grow an organoid. It wouldn’t be like me at hall.  </p>



<p>Anil    01:28:10    Yeah, that’s right. And firstly, I, I, I also think that the prospect of, of building a conscious organoid is, is very remote, but I think it’s much less remote than the prospect of building a conscious laptop. Um, one of the big questions about potentially conscious organoids, and again, I don’t think we should be setting out to build these things, um, is whether a history of, of interaction with an environment matters. Uh, so we organisms brains, non organoid brains, invariably have an evolutionary history that involved bodies and a developmental history that involves bodies, senses interaction with the environment as well. It’s pretty clear that we don’t need that interaction in the moment to have conscious experiences, can we can be dreaming and be basically cut off at least from the external environment, but we may need a history of that interaction in order to provide any determinant, uh, conscious content.  </p>



<p>Anil    01:29:12    Uh, and so we, I think we wrote something along these lines in a paper, I had a paper with a tin Bain and my child had massive meany in trends in neurosciences in 2020 called islands of awareness. And it was discussing candidate situations where we might then might be consciousness completely cut off from a body in an environment. And an organoid was, was one of these cases. And I think we wrote there that we might be able in some, some future to tell, uh, whether an organoid is conscious, but have no idea what it is conscious of at all. And I think that that could be one way. It goes on the other hand, people building organoids, designing organoids these days are equipping them with sensors with, with actuators too. So you have now the possibility of, of organoids that can interact with the environment, but do they do so, do they have a body that they maintain in the state of being alive? And, and that’s, that’s a whole other question then you’re not really talking about an organoid you’re talking about in synthetic creature with a brain, with a synthetic brain, very different thing.  </p>



<p>Paul    01:30:21    Uh, Neil, we w we went through a pretty good chunk of the book, but, you know, there was a ton more that we didn’t get to cover. And I hope that people read the book. Um, if not just for the pleasure of reading it, because it is easy on the eyes and mind as you read it. And of course, uh, has tons of good ideas and descriptions in there. So this has been a joy for me. Thank you for being here and good luck with the book.  </p>



<p>Anil    01:30:43    Ah, thank you, Paul. It’s been a terrific ramble through various landscapes of the Birkin. Very much enjoyed the conversation. And thank you, Megan, for lots of questions to   </p>

</div></div>


<p>0:00 – Intro<br />6:32 – Megan Peters Q: Communicating Consciousness<br />15:58 – Human vs. animal consciousness<br />19:12 – BEING YOU A New Science of Consciousness<br />20:55 – Megan Peters Q: Will the hard problem go away?<br />30:55 – Steve Fleming Q: Contents of consciousness<br />41:01 – Megan Peters Q: Phenomenal character vs. content<br />43:46 – Megan Peters Q: Lempels of complexity<br />52:00 – Complex systems and emergence<br />55:53 – Psychedelics<br />1:06:04 – Free will<br />1:19:10 – Consciousness vs. life vs. intelligence</p>
]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/ed81adf7-e879-4af2-8b43-5d14291a3f06-117-Anil-Seth-public.mp3" length="88766181"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[




Support the show to get full episodes and join the Discord community.











Anil and I discuss a range of topics from his book, BEING YOU A New Science of Consciousness. Anil lays out his framework for explaining consciousness, which is embedded in what he calls the “real problem” of consciousness. You know the “hard problem”, which was David Chalmers term for our eternal difficulties to explain why we have subjective awareness at all instead of being unfeeling, unexperiencing machine-like organisms. Anil’s “real problem” aims to explain, predict, and control the phenomenal properties of consciousness, and his hope is that, by doing so, the hard problem of consciousness will dissolve much like the mystery of explaining life dissolved with lots of good science.







Anil’s account of perceptual consciousness, like seeing red, is that it’s rooted in predicting our incoming sensory data. His account of our sense of self,  is that it’s rooted in predicting our bodily states to control them.



We talk about that and a lot of other topics from the book, like consciousness as “controlled hallucinations”, free will, psychedelics, complexity and emergence, and the relation between life, intelligence, and consciousness. Plus, Anil answers a handful of questions from Megan Peters and Steve Fleming, both previous brain inspired guests.



Anil’s website.Twitter: @anilkseth.Anil’s book: BEING YOU A New Science of Consciousness.Megan’s previous episode:BI 073 Megan Peters: Consciousness and MetacognitionSteve’s previous episodesBI 099 Hakwan Lau and Steve Fleming: Neuro-AI ConsciousnessBI 107 Steve Fleming: Know Thyself


Transcript

Thanks for being here. A huge congratulations on the book. I know I sent this to you in email, but, uh, it was very refreshing, refreshing, really clear, and, uh, the writing style was just easy and fun to read. So nice job.  



Anil    00:03:49    Thanks for that. That means a lot. And thanks for having me.  



Paul    00:03:52    We’re going to, I don’t want to bury the lead here, but I want to ask a couple of questions just about writing the book and the book itself before we get into its contents. Um, so on a recent episode I had, uh, Steve Grossberg on and by the way, I have you, um, heard his or read his, uh, his recent tome or any of it conscious mind resonant brain. I have,  



Anil    00:04:15    I haven’t yet. I have ordered it, uh, but I have not yet read it. Okay.  



Paul    00:04:19...]]>
                </itunes:summary>
                                                                            <itunes:duration>01:32:09</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 116 Michael W. Cole: Empirical Neural Networks]]>
                </title>
                <pubDate>Tue, 12 Oct 2021 10:36:10 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-116-michael-w-cole-empirical-neural-networks</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-116-michael-w-cole-empirical-neural-networks</link>
                                <description>
                                            <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2021/10/art-116-01.jpg" alt="" class="wp-image-1451" />



<p class="has-text-align-center"><strong>Support the show to get full episodes and join the Discord community</strong>.</p>



<div class="wp-block-image"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2021/09/become_a_patron_button@2x.png" alt="" class="wp-image-1434" /></div>



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2021/10/michaelwcole.jpg" alt="" class="wp-image-1452" width="216" height="216" /></div>



<p>Mike and I discuss his modeling approach to study cognition. Many people I have on the podcast use deep neural networks to study brains, where the idea is to train or optimize the model to perform a task, then compare the model properties with brain properties. Mike’s approach is different in at least two ways. One, he builds the architecture of his models using connectivity data from fMRI recordings. Two, he doesn’t train his models; instead, he uses functional connectivity data from the fMRI recordings to assign weights between nodes of the network (in deep learning, the weights are learned through lots of training). Mike calls his networks empirically-estimated neural networks (ENNs), and/or network coding models. We walk through his approach, what we can learn from models like ENNs, discuss some of his earlier work on cognitive control and our ability to flexibly adapt to new task rules through instruction, and he fields questions from <a href="https://braininspired.co/podcast/54/" target="_blank" rel="noreferrer noopener">Kanaka Rajan</a>, <a href="https://braininspired.co/podcast/26/" target="_blank" rel="noreferrer noopener">Kendrick Kay</a>, and Patryk Laurent.</p>



<ul><li><a href="https://www.colelab.org/#" target="_blank" rel="noreferrer noopener">The Cole Neurocognition lab</a>.</li><li>Twitter: <a href="https://twitter.com/TheColeLab">@TheColeLab</a>.</li><li>Related papers<ul><li><a href="https://www.colelab.org/pubs/2019_ItoHearne_TiCS.pdf">Discovering the Computational Relevance of Brain Network Organization</a>.</li><li><a href="https://doi.org/10.1101/2020.12.24.424353">Constructing neural network models from brain data reveals representational transformation underlying adaptive behavior.</a></li></ul></li><li>Kendrick Kay’s previous episode: <a href="https://braininspired.co/?s=kendrick">BI 026 Kendrick Kay: A Model By Any Other Name</a>.</li><li>Kanaka Rajan’s previous episode: <a href="https://braininspired.co/podcast/54/" target="_blank" rel="noreferrer noopener">BI 054 Kanaka Rajan: How Do We Switch Behaviors?</a></li></ul>


<div class="lightweight-accordion"><span>Transcript</span><div class="lightweight-accordion-body">

<p>Michael Cole. Uh, you and I go back a few years, welcome to the  </p>



<p>Michael    00:04:01    Podcast. Thanks for having me on.  </p>



<p>Paul    00:04:04    So, uh, we, well, I say we go back a few years. Uh, it’s more like, uh, I’ve just been admiring you from afar. I guess you were one year ahead of me in graduate school at the CNBC at Pitt and CMU. And you’ve gone on to be many years ahead of me. It turns out,  </p>



<p>Michael    00:04:21    I don’t know about that. You’re, you’re pretty, I don’t know, like intellectually, I feel like there’s, I’ve been following you from afar, I guess I should say in the form of the podcast, as soon as I heard you had this podcast, I started listening. I haven’t heard all your episodes yet, so many, but I I’ve heard, but, uh, yeah, I dunno, like I can, I can see that you’ve really, uh, expanded your horizons and I’m a little jealous that you have, like the time and, and, uh, I guess, space to be having these really awesome conversations with such a variety of people.  </p>



<p>Paul    00:04:58    Well, uh, well today’s topic is about, uh, the jealousy that I have for you and what you’re doing, so  </p>



<p>Mi...</p></div></div>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[




Support the show to get full episodes and join the Discord community.











Mike and I discuss his modeling approach to study cognition. Many people I have on the podcast use deep neural networks to study brains, where the idea is to train or optimize the model to perform a task, then compare the model properties with brain properties. Mike’s approach is different in at least two ways. One, he builds the architecture of his models using connectivity data from fMRI recordings. Two, he doesn’t train his models; instead, he uses functional connectivity data from the fMRI recordings to assign weights between nodes of the network (in deep learning, the weights are learned through lots of training). Mike calls his networks empirically-estimated neural networks (ENNs), and/or network coding models. We walk through his approach, what we can learn from models like ENNs, discuss some of his earlier work on cognitive control and our ability to flexibly adapt to new task rules through instruction, and he fields questions from Kanaka Rajan, Kendrick Kay, and Patryk Laurent.



The Cole Neurocognition lab.Twitter: @TheColeLab.Related papersDiscovering the Computational Relevance of Brain Network Organization.Constructing neural network models from brain data reveals representational transformation underlying adaptive behavior.Kendrick Kay’s previous episode: BI 026 Kendrick Kay: A Model By Any Other Name.Kanaka Rajan’s previous episode: BI 054 Kanaka Rajan: How Do We Switch Behaviors?


Transcript

Michael Cole. Uh, you and I go back a few years, welcome to the  



Michael    00:04:01    Podcast. Thanks for having me on.  



Paul    00:04:04    So, uh, we, well, I say we go back a few years. Uh, it’s more like, uh, I’ve just been admiring you from afar. I guess you were one year ahead of me in graduate school at the CNBC at Pitt and CMU. And you’ve gone on to be many years ahead of me. It turns out,  



Michael    00:04:21    I don’t know about that. You’re, you’re pretty, I don’t know, like intellectually, I feel like there’s, I’ve been following you from afar, I guess I should say in the form of the podcast, as soon as I heard you had this podcast, I started listening. I haven’t heard all your episodes yet, so many, but I I’ve heard, but, uh, yeah, I dunno, like I can, I can see that you’ve really, uh, expanded your horizons and I’m a little jealous that you have, like the time and, and, uh, I guess, space to be having these really awesome conversations with such a variety of people.  



Paul    00:04:58    Well, uh, well today’s topic is about, uh, the jealousy that I have for you and what you’re doing, so  



Mi...]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 116 Michael W. Cole: Empirical Neural Networks]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2021/10/art-116-01.jpg" alt="" class="wp-image-1451" />



<p class="has-text-align-center"><strong>Support the show to get full episodes and join the Discord community</strong>.</p>



<div class="wp-block-image"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2021/09/become_a_patron_button@2x.png" alt="" class="wp-image-1434" /></div>



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2021/10/michaelwcole.jpg" alt="" class="wp-image-1452" width="216" height="216" /></div>



<p>Mike and I discuss his modeling approach to study cognition. Many people I have on the podcast use deep neural networks to study brains, where the idea is to train or optimize the model to perform a task, then compare the model properties with brain properties. Mike’s approach is different in at least two ways. One, he builds the architecture of his models using connectivity data from fMRI recordings. Two, he doesn’t train his models; instead, he uses functional connectivity data from the fMRI recordings to assign weights between nodes of the network (in deep learning, the weights are learned through lots of training). Mike calls his networks empirically-estimated neural networks (ENNs), and/or network coding models. We walk through his approach, what we can learn from models like ENNs, discuss some of his earlier work on cognitive control and our ability to flexibly adapt to new task rules through instruction, and he fields questions from <a href="https://braininspired.co/podcast/54/" target="_blank" rel="noreferrer noopener">Kanaka Rajan</a>, <a href="https://braininspired.co/podcast/26/" target="_blank" rel="noreferrer noopener">Kendrick Kay</a>, and Patryk Laurent.</p>



<ul><li><a href="https://www.colelab.org/#" target="_blank" rel="noreferrer noopener">The Cole Neurocognition lab</a>.</li><li>Twitter: <a href="https://twitter.com/TheColeLab">@TheColeLab</a>.</li><li>Related papers<ul><li><a href="https://www.colelab.org/pubs/2019_ItoHearne_TiCS.pdf">Discovering the Computational Relevance of Brain Network Organization</a>.</li><li><a href="https://doi.org/10.1101/2020.12.24.424353">Constructing neural network models from brain data reveals representational transformation underlying adaptive behavior.</a></li></ul></li><li>Kendrick Kay’s previous episode: <a href="https://braininspired.co/?s=kendrick">BI 026 Kendrick Kay: A Model By Any Other Name</a>.</li><li>Kanaka Rajan’s previous episode: <a href="https://braininspired.co/podcast/54/" target="_blank" rel="noreferrer noopener">BI 054 Kanaka Rajan: How Do We Switch Behaviors?</a></li></ul>


<div class="lightweight-accordion"><span>Transcript</span><div class="lightweight-accordion-body">

<p>Michael Cole. Uh, you and I go back a few years, welcome to the  </p>



<p>Michael    00:04:01    Podcast. Thanks for having me on.  </p>



<p>Paul    00:04:04    So, uh, we, well, I say we go back a few years. Uh, it’s more like, uh, I’ve just been admiring you from afar. I guess you were one year ahead of me in graduate school at the CNBC at Pitt and CMU. And you’ve gone on to be many years ahead of me. It turns out,  </p>



<p>Michael    00:04:21    I don’t know about that. You’re, you’re pretty, I don’t know, like intellectually, I feel like there’s, I’ve been following you from afar, I guess I should say in the form of the podcast, as soon as I heard you had this podcast, I started listening. I haven’t heard all your episodes yet, so many, but I I’ve heard, but, uh, yeah, I dunno, like I can, I can see that you’ve really, uh, expanded your horizons and I’m a little jealous that you have, like the time and, and, uh, I guess, space to be having these really awesome conversations with such a variety of people.  </p>



<p>Paul    00:04:58    Well, uh, well today’s topic is about, uh, the jealousy that I have for you and what you’re doing, so  </p>



<p>Michael    00:05:04    Let’s kill it.  </p>



<p>Paul    00:05:05    Yeah. Focus. Let’s focus on that. So back when I knew you, um, you were a cognitive control guy back with Walt Schneider or early on, but, uh, I’d like to just, um, pick your brain about how you see your sort of career trajectory and, um, alongside that, just how your interests have changed, uh, over time.  </p>



<p>Michael    00:05:28    Um, yeah, sure. So how far back should we go? Um, I actually got into cognitive control and marked Esposito’s lab at UC Berkeley. That’s where I went to undergrad and I didn’t know much about it. Uh, when I volunteered in that lab, I mean, I, I learned about it in class. Um, so I was a cognitive science major, so that was some multidisciplinary major at UC Berkeley, um, and actually out with more interest in psychology and computer science. And then I was forced to take these neuroscience classes. I ended up shifting my interests toward sores there. So I had this kind of computational and, you know, cognitive psychology kind of bent to my interest in neuroscience early on. And I volunteered in Marquez DDoS lab. Um, so full-time RA for a little bit and then started working with Walt Schneider. And, um, while I was there, I got more and more into computational topics.  </p>



<p>Michael    00:06:34    Um, but kind of like indirectly, I guess I’ve been reading papers and building little models for a long time, but not publishing much on that, but it’s really shaping my and shaping my thinking over the years. Um, and so, uh, while it was there, um, so while, um, back in the 1970s, I had, uh, come up with this controlled versus automatic processing dichotomy that really influenced things a lot. And so you would talk a lot about the real basics of like, what is controlled processing, what’s automatic processing. And, um, out of that, I realized, um, there hadn’t been much exploration of, one of the definitions of controlled processing is novel task performance. And so that led me into rapid instructed tasks learning, which is there wasn’t much for almost any work done at the time when it first started thinking about it and talking to Walter about it. And so, yeah, that ended up being my dissertation. So that’s technically, you know, almost by definition conduct control is just a different, it’s not like conflict kind of stuff is typically talked about, um, with kind of control or  </p>



<p>Paul    00:07:44    So what, we’re going to talk a little bit about, um, riddle, rapid instructed task learning. So what is that? And, you know, in the, in the sort of big picture, uh, what have you found, and I know that it has stayed with you throughout your career,  </p>



<p>Michael    00:07:59    Right? Yeah. So riddle stands for rapid instructed tasks learning and, uh, it’s something that we actually do all the time in everyday life. So for instance, like playing a new game that you’ve never played before, like monopoly, there’s maybe some rules that someone tells you about the game, and then you can rapidly integrate those altogether and play this game. And it may sound kind of trivial because, you know, I’m using a game example, but it’s really in everyday life, we do all sorts of things like cooking new recipe, um, or use new technologies. So it’s not just about being able to understand the words it’s about being able to transfer previous knowledge into new contexts. So you get a new smartphone, you don’t have to start from scratch and kind of do this trial and error learning. You can transfer knowledge that you already had and also, um, have the instructions kind of prompt you on what kind of transfers to have, um, different kinds of information that you’ve actually learned before could be relevant in a new context. And it’s really interesting to me, both from a computational perspective, and also just in terms of, I guess, I guess contrasting humans with machines and humans versus animals, um, the animals can’t do it. It’s up for it turns out I ended up adding this with a figure, um, to my dissertation. Cause I thought it was interesting. There’s one, uh, bonobo, chimpanzee named Kanzi who can do it. So it’s possible in animals  </p>



<p>Paul    00:09:34    Not to sign right?  </p>



<p>Michael    00:09:35    English words, English words for simple little tests, English words. Yeah. He’s a, he’s like a genius Chimp. So, so it’s possible, but yeah, there’s just like one genius animal that can do it. Um, pretty much.  </p>



<p>Paul    00:09:50    So the idea right is to have a massive set of different possible tasks that an organism or a machine could perform. And then you instruct, uh, whatever task to perform. You instruct it. And this happens just kind of interleaved, right? Where you say, do this task, all right. Now do this task. Now do that task. And the idea is you have to be able to switch between them, which takes a lot of cognitive control.  </p>



<p>Michael    00:10:15    Yeah. I mean, th there is a distinction between the theoretical like topic or construct or however you want to put it of rental, which is just things that we do every day. Like, um, I dunno, like get directions to go to a new grocery store or something, or, or kind of arbitrary things you could have people do. Like, um, I dunno, sing the national Anthem while jumping on one leg or some things that you’ve never done before that you could clearly do immediately. There’s this whole set of them, but there’s of course limits to that. And so the problem was how do we translate that into an actual systematic way to research it empirically? And so that’s where this kind of a paradigm that I developed with Walt started from an idea is this, uh, cognitive tests that we came up with to investigate riddle systematically.  </p>



<p>Michael    00:11:12    The idea is that we have these little tasks with three rules each. And the key is that we want them to be kind of arbitrary and complex enough that we were really sure that participants haven’t actually done them before. So they’re novel, right? Uh, there’s something to be learned, but we also want them to be learnable very rapidly, uh, for humans. And so we have, uh, an example here would be both vertical left index. If those are just beep cues that you’d see on the screen. And then what that means is if both stimulate your vertical press, your left index finger, so then two stimulator would come up in this case, there are these vertical or horizontal bars. You see two vertical bars in this case, in this example. And so the answer would be true and you’d press your left index finger. And so we can swap out these different stimuli and different rules.  </p>



<p>Michael    00:12:05    Um, so another example task would be neither red left index. So that means if neither stimulus is red, press your left index finger. And so you’d press, uh, and if it’s not true, you’d press your left middle finger. And then so you see like a blue vertical bar and a red horizontal bar. So it’s not true that neither seamless is red. So you’re pressed here, your left middle finger in that case. And so, uh, you know, it’s not super easy to do, but it has fins can do this on the first try if you can believe it, uh, about chance. And that key is that we’re moving across, you know, think of it like a state space, right? You have all these different combinations of rules and it’s systematically, we’re systematically traversing it cognitively, right? In terms of information processing. And we want to be looking at the brain while that’s happening and you can see the brain updating and the systematic way we can do all sorts of things, like look at changes in activity patterns and the functional connectivity patterns with task, state, functional connectivity. And then that’s, um, led to, uh, what we call the flexible hub theory that we’re really testing. And some of these papers where, um, we have these kind of control networks that, um, are really highly distributed. And we’ve found evidence that there have hubs in them that update global processing systematically as you perform different tasks, like this,  </p>



<p>Paul    00:13:29    Say a few more words about flexible hub theory, because I’m not sure that we’re going to be talking about it a ton, but I think it’s a neat and important. So can you just talk about that a little bit more about what it is and what you found?  </p>



<p>Michael    00:13:41    Yeah, so the flexible hub theory is really building on some older theories. Uh, uh, one is called the, uh, guided activation theory by Miller and Cohen. So that was back in 2001 and it actually got, it goes all the way back to some artificial neural networks and the early nineties, they were really focused on lateral prefrontal cortex and, um, how they represent their, it represents contexts or tasks, rules. And, uh, so what we’ve done with this theory is really expand that to entire kind of control networks, so much more distributed. Uh, we’re also emphasizing global brain connectivity or hub newness. Um, cause we’re really thinking about in a context of something like riddle, you need to be able to rapidly update your global information processing. And so how is that going to happen? How are you going to even coordinate that sort of update? And so there’s a lot of evidence from neuroimaging that these kinds of control networks are involved in that sort of thing.  </p>



<p>Michael    00:14:38    And also from lesion studies, uh, from neurology and neuro-psychology. Um, so if you ablate these regions, you have major problems with things like Rydell and, uh, fluid intelligence generally. Uh, so that’s one thing. So we’re really emphasizing the connectivity here. Um, there’s also flexible connectivity. So we’re looking at task state functional connectivity, uh, how it updates, um, uh, the connectivity, how the brain regions interact update, and then finally a more of a computational property is called compositional coding. So the idea is that you don’t just totally update with new connections and new activity patterns. Every time you do a new tasks, you actually reuse, uh, activity patterns and connectivity patterns that you’ve done before. And the kind of the kind of format of the pro paradigm lends itself to this, right? Cause we’re reusing different rules in different contexts, making new tasks sets that have never been performed before. And you can see that a lot of the same information patterns are being reused when we actually look at the connectivity and activity patterns. So altogether, um, yeah, that, that kind of builds this theoretical framework that we call the flexible hub theory.  </p>



<p>Paul    00:15:53    All right. Well take a deep breath because I’m going to play you the first guest question here. Yeah. Well, because it’s, it’s, it’s from the rapid instructed task learning era of your life. And I think you’ll recognize who it is. I’ll, I’ll say who it is after I, after the question.  </p>



<p>Speaker 4    00:16:12    Hi, Mike Patrick here, big fan of your work ever since graduate school at the centers for neuroscience and the neural basis of cognition, Pittsburgh. Wondering if you could share ideas on how riddle could be used to study a couple of phenomenon on the one hand free will, which Al narrowly define as the emergence of an instruction rather than an explicit given instruction through language perhaps synthesized by activity and another brain region, or even a neuromodulatory process. So not quite a random goal, but rather an intended goal do free will representations like the decision to go to a particular goal or perform a particular action look like commanded instructions. And on the other hand, stubbornness or a steadfastness, the preservation of instruction or goal in the face of distracting, disruptive, or even goal related inputs, do those representations look similar to the commanded representations?  </p>



<p>Paul    00:17:26    All right, Mike, we’ve only got three hours to do this. So that’s Patrick Lauren, uh, an old friend of both of ours, who is the director of emerging technology at DMGT, which is a British, private holding company. All right.  </p>



<p>Michael    00:17:46    Starting  </p>



<p>Paul    00:17:46    With an easy one, right. That’s why I wanted to introduce riddle because,  </p>



<p>Michael    00:17:56    Um, so yeah, there are a few things I could say about that. Well, one is that we could think of our ability to flexibly switch our goals into novel scenarios as like on a continuum with red over. So the way we’ve been studying it is just kind of taking the shortcut for, uh, experimental convenience of giving the instructions and just having the participants like do this task correctly or incorrectly and systematically explore the space. But, uh, I have thought quite a bit about what if we had the participants select their own tasks or explore some space of tasks and then I guess, yeah, that would open things up to more for your will. There is a literature on task switching. So there’s a relationship between all this stuff and task switching. So those would be between two familiar tasks, um, uh, as opposed to a novel task.  </p>



<p>Michael    00:18:55    And there’s a whole literature on, if you let the participant choose whether to switch to the other task or not, and then there’s different brain responses to those two things. And, you know, it’s not my exact area, so I can’t really describe what the differences are in detail, but, um, uh, they tend to be from what I recall and what we call cognitive control network. So there’s some kind of additional control processes that are involved in making that decision. Um, yeah, I don’t, I’m not sure what their, what was the second part of the question? It was, uh, it was a very, uh, stubbornness, right? Huh? Yeah. I mean, there’s a whole literature on, uh, perseveration with, uh, that’s related to cognitive control. So it’s the inability to switch to switch the current rule that you’re supposed to be applying in a given task. It kind of gets stuck in one state.  </p>



<p>Michael    00:19:58    So I don’t know, I, I kind of wonder if, uh, stubbornness might be actually default and you need conduct control and higher order cognition. That kinda jump out of that, unless there’s some kind of stimulus driven reward thing that would pop you out of the state, but yeah, kind of, I guess I’m trying to read between the lines. I think Patrick might be asking about self-instruction kind of, and, and I, yeah, I think that is pretty compelling. Um, and maybe like an evolution when we evolved the ability to, um, represent is kind of task instructions in this really flexible way. We’ve probably got both some ability to, uh, think of novel tasks and perform them and also be instructed from others. Um, either through imitation or language yeah.  </p>



<p>Paul    00:20:53    With the rapid instructor task learning. I mean, like you already said, you create these artificial scenarios where you have to, um, whereas really instruction dependent and there’s a lot of control having to even understand the instructions and put them together. So, so free will right. Internally generated internally motivated, uh, you know, whatever free will really is, but that internal, um, self organ, you know, wherever it comes from, some sort of self-organization process where it’s generated internally, internally, um, I don’t know how much do you think that it would overlap? Um, network-wise with like the riddle types of networks,  </p>



<p>Michael    00:21:35    I guess I’m kind of thinking of there’s, there’s some set of mechanisms that, um, you know, the language network would, uh, interact with and these kinds of control networks sometimes about pre lateral prefrontal cortex, uh, poster, product cortex, um, mid simulate, pardon me, mid singular cortex. Um, and what would happen in the case of instruction? Uh, external instruction would be through, you know, auditory cortex, the language court, uh, the language network, and then to kind of control network. And then maybe, I don’t know where it would start maybe orbital frontal cortex or something where you’re representing, you know, the reward that you might receive. If you do this other thing, it might drive selection of, of a set of task rules or strategies for getting those rewards. Yeah. I mean, freewill is a tricky thing. Like, um, obviously, but I guess my own, you know, as a neuroscientist, I think most neuroscientists, I don’t want to speak for everyone, but, um, there’s this sense that I get when I speak to neuroscientists about freewill is that there is no freewill or there’s it’s it’s, uh, mechanisms all the way down that, you know, are, are largely determined just by one’s history and, and physical reality.  </p>



<p>Michael    00:22:57    And so I don’t think maybe Patrick continued to as to go down that rabbit hole that deep, but like, um,  </p>



<p>Paul    00:23:04    Oh, I bet he did, but  </p>



<p>Michael    00:23:06    He did bring it up pretty well, but yeah, I guess I would say that, um, yeah, it’s probably gonna come down to, uh, reward predictions for selecting the goal, like, uh, and maybe the tasks that you want to perform. If you have like free reign, there’s also this funny thing, a lot of the freewill experiments will do, or they they’ll give you like two choices and then you just kinda can randomly, or like Benjamin experiment where you just press a button whenever you want. There’s some, uh, they’re called demand characteristics in the literature, but like the task is to, you know, press the button whenever you want. Well, like if you really ask the person, do they even want to press the button? Like if they really were free, they’d just like walk out of there and do something more fun than that. So what they’re really doing is forcing you into this situation where you need to decide if you’re gonna press a button and then you’re sitting there and you’re like, well, I kind of don’t like that say, you didn’t even want to press the button at all, but you’re like the social pressure to press this button.  </p>



<p>Michael    00:24:13    So I guess I’ll press it every once in a while. And so there’s, there’s some little element of freewill, but it’s also just like, okay, I have to, you know, I have all these constraints on my behavior actually. And then there’s also this sense of like, it’s gotta appear random, which is not normal for people to be random. So  </p>



<p>Paul    00:24:33    Right. We’re not random. I hadn’t heard that there had been other criticisms of the, of the limit experiments and we don’t need to go through all that because we need to move on. But I hadn’t heard that one that, that, um, you are constrained to that particular response. And so that in itself is a bit of a con found, I suppose, for free free will. I mean, so most of the, most of these solutions to free will, are really reconceptualization of the concept of, of free will. Um, so it’s almost like moving the goalposts, but on the other hand, the free will that we commonly want, right. This, um, the sovereignty over all of our actions and thoughts, uh, I think everyone agrees that that doesn’t exist because it, well, most people agree that that doesn’t exist because it requires some sort of quantum indeterminacy. And then we have to like somehow be in that nexus and be responsible for our own behaviors, you know, or something like that. Right. So anyway, most of the solutions that I see re reconceptualize it rightly so I think,  </p>



<p>Michael    00:25:39    Yeah. I mean, we should move on, but I will say that I, I do think we have pretty much the kind of freewill that we actually do on it’s just that I think of it as, as long as who I am myself representation, which is in my brain is properly controlling or influencing how I behave and what goals I pursue. Then that’s the kind of freewill that we want. It’s just that who we are is determined by genetics and our past. Right. And with that, that’s my conceptualization of it.  </p>



<p>Paul    00:26:15    Yeah. All right. Very good. Well, let’s back out, so thanks, Patrick. So thanks Patrick. I’m going to go ahead and play another question because, um, it has to do with MRI. So, and then this will bring us up to speed with the work that we, that, uh, you’ve done that we really want to talk about because a lot of your empirical work is, is based on MRI measurements, right? How you construct these networks that we’re going to talk about. So let me just play this question for you.  </p>



<p>Speaker 5    00:26:46    Hey Mike, this is Kendrick K, and I have a question for you. It’s sort of an open-ended question. So really any remark you have along these topics would be appreciated. Uh, I was thinking about your sort of research and your focus. Um, I mean, obviously you use a for Mariah is a measurement technique and you are thinking about computational models of cognition. Um, so I guess my question has to do with, uh, the limitations or your wishlist, maybe so to speak, uh, for neuro imaging, uh, I guess specifically Fri um, in terms of informing your, uh, network models of computation, what, what, what, what do you desire out of FMRI or what do you see are the current limitations? Uh, for example, is it spatial resolution? So obviously FMRI is limited, at least in compared to single neurons or multiunit activity type recordings. And, you know, for Mariah is trying to push the limits there and trying to get higher and higher resolution, but it’s, it’s from practical sense pretty far away from small populations of neurons.  </p>



<p>Speaker 5    00:27:49    So do you feel that’s a major bottleneck to say developing the types of network models that you do, uh, or alternatively things like artifacts and head motion, and whether you think that’s a major problem and whether, you know, that’s limiting, uh, your progress in the type of research that you do. So I would just be curious to hear your thoughts on sort of whether you’re worrying about currently, uh, trying to make FMI better either in terms of the raw acquired data and or the analyses that we can make of it. Uh, again, with the ultimate goal of trying to inform, uh, what we can learn about computation in the brain.  </p>



<p>Paul    00:28:28    So he, he mentioned your network coding models. I think he just called them network models. And we’re going to talk about those, the types of models that you build. So if you feel that that was Kendrick K by the way, uh, if you feel like you need to explain those two to answer those questions, go for it. Otherwise we can hold off and you can answer the, uh, the, uh, your wishlist for MRI.  </p>



<p>Michael    00:28:54    So, yeah, I, I have been, I’ll say this. So in general, in my career, I had these kinds of oscillations of, um, pessimism versus optimism. And so, um, it’s actually pretty useful because when I’m in one of the other states, I’m maybe overly optimistic, I’ll think back to like my pessimistic views. So at various times I’ve been quite pessimistic about SMRI and then other times I’m quite optimistic. And I have to say overall, I’ve learned a lot more about computation from FMI than, uh, my most pessimistic phases and the things that frustrate me the most about Fri at this point, I think has to do with the temporal resolution, because these things have, uh, these network models, which, um, we can get get into in a bit, I’ve really come to the conclusion that we need to understand causal relationships between neural populations. And that’s going to be the key.  </p>



<p>Michael    00:30:01    And, uh, temporal information is very useful, right, for making causal inferences. Um, but it’s not the only way, and it’s not the only piece of information, but it’s quite useful. So I actually have done, uh, a bit of work with Meg and, uh, started doing more UGI work high-density EGF because of course it’s the opposite problem. There were more frustrated with the poor spatial resolution. Um, and then, yeah, my, uh, frustration has led me to, uh, work with some non-human primate data sets recently, um, for multi-unit recording, not, you know, collecting the data in my lab, but, but really, um, in terms of theory and method development, that kind of thought experiment that led to a lot of this stuff, we’re going to talk about that network models is, uh, was from actually thinking about the perfect kind of neuroimaging technique. If we could record all neurons in real time, what would I do with that?  </p>



<p>Michael    00:31:09    Right. And so that’s actually keeps coming up again and again, where I’m just like, oh, well, you know, I want to, I wish I had that. Here’s what I actually have. How do I make do, and try to get closer to that? Would you know what to do with that? I, I, I think so it’s one of those things where you don’t know for sure until you go and try it, it’d be probably too, too much data. I’d have to do data reduction, to be honest, but, but what would be cool about that is you could pick your data reduction based on theory or something and get ensembles and, and, and do a different analysis or get different ensembles or something like that. But, but yeah, I mean, I guess we get into the network model approach, and then I thought about it pretty abstractly, pretty abstractly with doing these thought experiments about like, what would, if it was real like spiking data or LFPs or something.  </p>



<p>Michael    00:32:11    But the idea is that we have these artificial neural networks and we want to, with this, these algorithms that can dictate how the dynamics play out on a network architecture. You know, if we had the data, uh, we would go in and take the, all the detailed connections between all the neurons and maybe we could simulate those dynamics on that network. Now, since we don’t have that sort of data, and especially in humans, we are using FMRI and that’s given us, you know, somewhat decent spatial resolution compared to something like parameter G, um, and, um, not great temporal resolution, but we have a lot of tricks up our sleeves for making do. Um, and the main trick is really experimental control. So you can control the timing of stimuli and responses and so forth. So you can separate different neural events from each other. And then there’s a lot of useful connectivity techniques.  </p>



<p>Michael    00:33:20    We could use structural connectivity we could use, but we typically use functional connectivity and we use, uh, specifically resting state functional connectivity. And the way I think about that might be a little, I think it’s different than a lot of people think about it. I should ask, make it a little survey and ask how people think about this, but I think of resting state connectivity as, um, as if you, it’s almost like if you can just inject noise into each, uh, neural population and see what happens downstream and, um, but you know, it’s spontaneous activity. So it’s kind of like just, we’re looking at the effects on this statistics of the signal from just these spontaneous activities flowing between different neuro populations. And then that gives us a sense of what’s called intrinsic functional connectivity. And what we found was that it’s really similar across a bunch of different brand states. So resting state isn’t necessarily that special. It’s just that, um, you’ve removed some compounds that it might be coming from test stimuli, and maybe it’s closer to something like structural connectivity with maybe the synaptic weights kind of influencing things. That’s one thing I like about it too. Relative districts for connectivity, we might, we might be getting closer to the actual, well, ultimately causal influences between them, but it’s, it’s really hard to make strong causal claims.  </p>



<p>Paul    00:34:55    All right, Mike, well, I’ve already, you know, I’ve buried the lead already, but, um, I thought that those two guests questions were sort of Mo would fit better toward the beginning. So, and in fact, I don’t really know exactly where to start because so what you’ve done in the paper is, uh, to take functional connectivity data. And in contrast to training up a model made up of some sort of architecture and otherwise fairly random units and random connectivity. Um, instead you guys have built models and used, uh, functional connectivity data to decide the architecture and also decide the weights, the nodes. So you don’t train the model. There are sort of two axes that, uh, we could talk about. One is the difference between, uh, your approach and the deep learning Jim DeCarlo, Dan Gaiman’s type approach. And also with recurrent neural networks where you, you train the network on a cognitive task, like you would train an organism on a cognitive task and you optimize the network and then you compare the network to your normal recordings or however you’re recording data. Uh, the other way, the other access that we could talk about, and I’ll let you decide what you’d like to, how you’d like to introduce the network. Coding models is, uh, this play between encoding and decoding models, which we’ve talked about on the podcast, uh, about a long time ago. And it would be good to refresh people’s memory and then use that to, to talk about what network coding models are. So do I, is that a fair enough summary? Yeah.  </p>



<p>Michael    00:36:32    Yeah. So we’re trying to kind of bypass the whole, like, uh, what’s the right learning role. Uh, how do you update the weights in these networks to just say, let’s go look, let’s go look in that human brain or, or even animal brains work too, if you had the right data and then just parameterize the network that way. Um, and then, so there are lots of different things that we are using it for and thinking that it’s useful for, um, one is just while we can go and like test these artificial neural network theories, um, because instead of just doing the same thing of optimizing for task performance, we can go and see what, whether the weights that are there from functional connectivity in the brain, whether the performance or the cognitive effects of interests will just emerge when you go ahead and simulate these things.  </p>



<p>Michael    00:37:34    And so it, it actually, they use the word emergence, which has been, I really think that’s what we’re doing, but I know there’s a lot of philosophical baggage with that term. So I started using the word, like just generate the cognitive process of interest, but it’s really the same thing. I mean, emergence in a very simple sense of like, you know, the property of going 60 miles an hour down a highway, you know, emerges from these mechanisms in the car that we understand it. It’s not some sort sorta like, uh, a very mysterious thing, but  </p>



<p>Paul    00:38:10    Yeah, I’ve been trying to say emergent properties just because it sounds less, uh, less like strong, magical images, but I don’t know how to turn the emergent properties into a verb. So  </p>



<p>Michael    00:38:25    Emerging  </p>



<p>Paul    00:38:25    Properties, I guess.  </p>



<p>Michael    00:38:27    Yeah. I’m trying to think. So there’s that you can test use these models to test, uh, these theories. We can use them to make sense of the neural data. So if you have a connection, when you say, oh, I think, you know, this connection is important or, you know, it’s connecting these two regions. You can have all these different ideas about what it does or is four. But when you build one of these models that will literally have these, what we call activity flows over those connections, and you can go and see, and you can even lesion inside the model and see like, oh, what did it do downstream? And the same goes for the activation. So the classic neuroimaging approach of just saying like where in the brain is this kind of the process you do that? And it’s like, yeah, you learn something, but then, you know, well, what does that activity do mechanistically it’s not really clear.  </p>



<p>Michael    00:39:22    And there’s typically, you know, there’s all this kind of hand wave and trying to interpret it, including in my own work. Right. Cause you’re trying to make some bigger narrative here that you understand what’s going on, but you know, if you actually link it up with connectivity, then you can say, well, oh, this, this activity or plausibly, you know, influences activity over here. And then that could lead to motor responses and behavior. And so you, you start to maybe have something like an integrated understanding of what’s going on. It’s not as easy as all that, of course, like I said before, the, uh, you know, I think it comes down to causal inferences and causal inferences are super hard, but I, I think a lot of, um, a lot of people have kind of given up on, uh, causality and they’ll just use correlations. We’ve been trying to kind of move past correlations, um, for the connectivity estimates because of this. And, and we have made some progress towards more Causely valid estimation, but, um, yeah, there aren’t perfect causal inferences. So we’re always pushing towards a, you know, more valid measures and I’m trying to make clear inferences here, but it’s like, I think it’s a good starting point. I think we are learning a lot and it’s just a matter of, you know, keeping on and, and advancing the methods while we’re advancing that theory and kind of making a nice feedback with them.  </p>



<p>Paul    00:40:51    Uh, how, how I mentioned that, like the Jim DeCarlo were using convolutional neural networks to study, uh, the ventral visual stream, right. And object recognition. How do you think of a network coding models in relation to that? Because, you know, one of the strengths of convolutional neural networks, which of course were inspired by the visual system already from way back with Fukushima and, uh, you know, through Yon Lacount, um, and now those models are the quote-unquote best predictive models for brain activity in those regions. And they were roughly modeled there. They were built to, uh, recreate roughly the hierarchical layers within the ventral visual stream, um, both in sort of their, their magnitude, the size of each layer. And of course they’re, they’re ordering. So, um, how does your approach differ and how do you, how do you think about what you’re doing relative to that kind of approach?  </p>



<p>Michael    00:41:49    I’d say that our approach is probably more empirically constrained because we not only have the activity patterns that were constrained by quote unquote constraints, both as like holding us in, but also telling us what, how the brain is computing things, right? So it’s constraints also in a good way, um, that we have the activity patterns and the connectivity patterns. And so if it works, if it predicts well on, on each layer, say we haven’t done this, the visual model with the multiple layers, but that would be interesting if it actually did work, then we would say, um, well, w maybe we understand more directly how the neural populations are interacting because there’s actual empirical constraint and the connectivity, if you don’t have that, then it’s an optimization problem. And there’s a lot of different solutions that would lead to the same predictions without saying, that’s actually how it works in the brain.  </p>



<p>Michael    00:42:52    Of course you’d have more constraints than we have, and that would be nice. And then even more confident, right. That this is exactly how it works in the brain, but that the point is right, that these key constraint of like, here’s actually how the neuro populations interact with each other is in there. And, um, that could allow for emergence of things, you know, the generation of processes that we don’t even think of, uh, because, you know, if it is really how the brain is, is working, uh, then you know, you put in maybe some stimulus that the person you’re modeling from, I guess you could take, uh, one individual, or you can take group data from F MRI or whatever, modality and parameter as the same. Maybe that person has never seen that stimulus before. And you’d see what happens in the model that would be interesting to see would it, and then you actually take that person on and have them see the stimulus.  </p>



<p>Michael    00:43:50    Would it do the same thing, but yeah. Um, I think they’re both super useful approaches, but, uh, there’s this added something about the inferences you can make and then there’s this like potential for yeah. Something that the connectivity is doing, maybe evolution specified it, there’s some kind of, uh, some kind of bias and the connectivity weights that does something, a model that’s optimized for the particular stimuli that were presented during training. Maybe they wouldn’t be optimized in the same way. Maybe it’s from the person’s development or experience that the connectivity weights might have been biased a particular way maybe to generalize better. So, yeah, there are a lot of questions like that. That would be really interesting. Yeah. It’d be really interesting just to compare, compare them. The thing is though, because it’s not optimized for the task performance, it’s probably going to do worse just because there’s noise in the data.  </p>



<p>Michael    00:44:51    Right. Like even if we had perfect data, then I would think it would do better just because humans have a ton of training and have evolution, like, um, setting things up for optimal performance to some extent. But, um, but yeah, there, there is this idea that we haven’t actually explored yet, but of also just starting with connectivity and then training on top of that. Um, so that, that could be interesting too, right? Like maybe it would speed up training, let’s start from the connectivity and maybe push the model in a certain way. One reason that we’re really excited about this activity flow approach and the whole, uh, ENM, uh, approach is applications to mental health and brain diseases. So we actually had a paper come out recently and science advances that looks at schizophrenia, and we build these little where you could think of as simple computational models that predict activity during a working memory task.  </p>



<p>Michael    00:45:53    And what we’ve found is that we can predict the abnormal activations during the working memory tasks and schizophrenia patients. And it’s also predictive of their working memory performance and how they have this deficit and working memory performance. And we took it, uh, just to kind of illustrate the power of these kinds of models is we actually took it a step further and made a treatment, uh, kind of hypothetical treatment that if we could get in and change the connectivity, however we wanted what would happen. And, uh, we have this machine learning algorithm that predicts from the healthy individuals and the patients, what their working memory memory performance would be. And once we implemented this hypothetical treatment and applied the activity flow algorithm to generate what activations would happen, uh, in the context of this treatment, we actually predict a 12% increase and the working memory performance, which puts the patients just about and the normal range. And so, yeah, we’re, we’re excited about, well, I mean, that’s illustrative of the power of this kind of approach for real-world applications potentially. And, and so we’re excited just about, um, the potential for that, but also, um, we, we don’t want to actually start to learn how to change connectivity, systematically and other, uh, research so that we can actually go and test this stuff.  </p>



<p>Paul    00:47:16    So I’m really, I’m really glad that there are people like you that are working on these things because diseases, uh, are super important. And they’re not something that I ever cared about in my research. So, but I know that that’s like sort of the point. And so, uh, it’s really great. It’s really great that you’re focusing on that now. Now I’m going to have to go ahead and play, uh, our last guest question. I think that this is a good time, although, um, you know, I was just talking about these convolutional neural networks, obviously something that, um, you’ve worked on as kind of having like recurrent neural networks and setting them up in an architecture so that they are talking to each other, like different brain areas would talk to each other and where you can go and perform multiple tasks. And we can come back to this idea of multiple tasks. Um, but you just saying that you’ve been thinking about training on top of the, uh, functional connectivity models. Um, made me think of this next question. So final guest question here from your coauthor, one of your co-authors.  </p>



<p>Speaker 6    00:48:19    Hi. Um, thank you for asking my opinion. I’m always happy to chat. Um, first of all, Mike is great. He and I coauthored a review on multi-tasking learning, um, in RNs with Robert Yang a few years ago. Um, and too, this is such a clever paper. Um, one of the many holes in the field of computational neuroscience in my opinion, is that there aren’t too many models of RNs based on human data, SMRI data in particular. Um, Mike is one of the few people thinking deeply in the space, um, and, you know, selfishly, I hope to be working alongside him. Um, again, scientifically, uh, both of the approaches, you know, using connectivity motifs in for, from FMR I in a generative, um, sense in neural network models like Mike does in this paper, uh, Mike and his team and training RNs based on time series or dynamics data directly, uh, like I do, and inferring from the second type of network models, connectivity, motifs, I think both of those approaches are perfect compliments.  </p>



<p>Speaker 6    00:49:20    Um, the two types of models should also be able to work as constraints for one another. And the reason I’m asking, um, well, the reason for my question is, you know, functional connectivity is often inferred. You’re saying, you know, network analysis or graph theoretic methods on the covariance matrix of time series data. Uh, we just, you know, the, you know, end by an object for an units or an voxels, um, now in types of networks that I build and train, um, to match units activity to time series directly such a covariance matrix should come along for free. You’ll see, cause every neuron or every voxel is kind of being fit. Uh, but in addition, in my type of models that recurrent weight matrix should also be dynamically stable and should work. And you should be able to find one, even if the underlying distribution were to change over time as it does in the brain.  </p>



<p>Speaker 6    00:50:13    So if you buy both of these things that I said, um, by knowing just the initial condition, we should be able to use this recurrent weight matrix from an RNN Fitch to dynamics in generative sense also, you know, almost as if it were hooked up to an actuator. Um, so my question to Mike would be, you know, when would this approach, um, in his opinion work or fail, and, you know, when I say work, um, I want that to mean to capture dynamics and maybe some features of behavior. And also, um, how would this depend on task complexity and the number of tasks being performed? Um, now I haven’t obviously shown any of this directly yet, or at all for human data, but, you know, I really would like Mike’s thoughts on these and then also, you know, would he please work with us, um, on this problem? Thanks, Paul.  </p>



<p>Paul    00:51:05    All right. Konica Rajon. So why did you get all?  </p>



<p>Michael    00:51:09    Yeah, I don’t know if I got all, all of it, but it sounds awesome. And an invitation  </p>



<p>Paul    00:51:16    For, for  </p>



<p>Michael    00:51:16    Collaboration I’m flattered. This is, um, yes, I would like to work on that. I’ll say that, uh, let’s see. Um, some problems I worry about are relevant here, worry about this type of approach. And I, um, so I’m focusing on the negative, but I think it’s, it is awesome. And so I’ll say that upfront, that isn’t a really good way to go. I think that things I worry about, uh, so the limitations of Fri with the temporal resolution in particular, uh, so the, the kind of recurrent dynamics are, uh, going to be difficult to pick up when you’re the neural activity is being filtered through the human dynamic response function. And so it’ll be like, you know, event that’s a hundred milliseconds long will be spread out over 18 seconds, um, function. And you can kind of infer when it happened, but it’s, it’s a rough approximation.  </p>



<p>Michael    00:52:17    Yeah. So we could use something called deconvolution to help with that potentially a loo current in my lab is a post-doc in my lab who is currently working on exploring those and trying to validate those approaches more so that that could help, but they aren’t perfect, but there are still a lot of, there’s still a lot of constraints that are there. So, um, it’s possible that we could use that for my data for that, uh, fitting recurrent neural networks. Um, the other thing I worry about is model complexity, um, between say two neuro populations, there are a bunch of different functions that could equally well predict downstream. So you need to take a certain strategy for dealing with that. And, and one of our strategies has been simplicity kind of like Hawkins razor kind of approach, um, and then adding complexity as necessary. So, you know, we start out with correlation, it’s probably the simplest thing, actually co-variants without normalization to be even simpler, but you know, you move up to correlation, you move.  </p>



<p>Michael    00:53:27    But then when we S we want to deal with, uh, the confounding problem and causality, so there are confounders. So it’s one region say influencing to others, you’ll make a false connection between those two others. So we use multiple regression typically to deal with that. So you fit all the time series simultaneously, and then, but then there nonlinearities, uh, which we haven’t fully gone into, but, um, we’re finding that there are cases where nonlinearities are really important. I don’t know the nitty gritty details of how the recurrent neural networks are fit. Is there some way to, or like with, um, multiple regression, for instance, we use regularization to also deal with some of this is a way of putting a bias into the model to simplify things. And, um, basically you can, you don’t fit noises as much. You put a bias in there. So, so you’re not doing as much over-fitting.  </p>



<p>Michael    00:54:27    Um, so I wonder if there’s some way to do that with, um, however, the recurrent neural networks, but, um, yeah, I definitely think, you know, the actual, I mean, there’s evidence that actual brand uses for current, um, connectivity a ton. Um, and there’s a lot of really good computational things that come out of that just from artificial neural networks, like, uh, the old element nets and so forth, like for language. And I could imagine for like the kind of paradigm. So we were talking about with rapid and start to test learning, I forgot to mention Todd braver, uh, held, uh, I was in his lab for my postdoc and, and actually Todd helped me, you know, we together developed rapid instruct to test learning the paradigms and the theory. Um, so I don’t even mention what Snyder, but, uh, yeah, Todd played a big role in that and, uh, also the network theories, but yeah, so that kind of task requires these sequential, um, processes. And it’s kind of like, yeah, you’re, you’re being programmed to do this little, uh, three roll program. Um, and, uh, that’s very different than, I guess what artificial neural networks are really good at like more like pattern recognition kind of thing. It’s, this is actually a sequence and it requires like temporal control and maintenance of information and updating information in time. And so that is really compatible with the things that, uh, recurrent neural networks can do.  </p>



<p>Paul    00:56:05    I was going to say, by the way, it’s, it’s fun to watch you think about a proposed collaboration in real time and immediately know that negative, like a good scientist.  </p>



<p>Michael    00:56:16    I kind of bookended it though. I said positive and then a bunch of negative. And I was like, no, this is totally the way to go.  </p>



<p>Paul    00:56:26    In fact, what happened is you started saying something negative in said, oh, I think it’s a really good idea, which is good on you. All right. Well, thanks Konica for the  </p>



<p>Michael    00:56:36    Question. Thanks for the question Konica  </p>



<p>Paul    00:56:38    Thinking about these, so-so the thing that you and Konica and, um, like Robert Yang are working on, are these sort of, inter-regional like multi-region kinds of models, right. Whereas, I mean, I think that you could think of the convolutional neural network as multi-regional, but if you train a convolutional neural network to do perform object recognition, you’re training it on one thing, essentially. And of course, um, you know, catastrophic forgetting is a problem in artificial networks. And so as continual learning, do you see the, the advent of these multi-regional kinds of networks, whether they’re inferred from empirical data like yours are or trained on the current flows, like, like Conoco’s are, or the more traditional, uh, train and recurrent network on cognitive, uh, set of cognitive tasks, um, like Robert Yang is doing, do, do you think that the interplay between these regions will help us explain, especially in a multi-task sort of environment, um, will help us explain properties of empirical data that wouldn’t be explained by training on one task in one network?  </p>



<p>Michael    00:57:59    Yeah. Um, I think that’s plausible. I don’t know exactly why mechanistically though. I’m trying to think of, well, I think actually it’s the, um, they call it the inductive biases is one term that’s out there for the kind of things that evolution brings to the table and actual biological systems and, and maybe those biases are toward generalization. And so that, that might be the way we would discover what those are and then we can start using those and artificial neural networks too. So that’s kind of, I kind of alluded to that sort of idea, like if we did, you know, the gym de Carlo style network, but using empirical connectivity, maybe that would generalize better, um, provision, I don’t know, but, uh, certainly, yeah, I can imagine there’s all sorts of different processes for, um, generating flexible behavior that would have been, you know, supposedly selected for during evolution that would, uh, maybe shape how development happens or how, you know, just the brain is organized as a whole.  </p>



<p>Michael    00:59:09    And then on top of that, there’d be these learn. There are these learning algorithms that fine tune things, but, uh, maybe they, these biases in the network organization are key. That be my guess, I don’t know about whether it’s important to have a lot of regions or, you know, it’s really about the number of units or how well, one thing that I’ve kind of wondered about actually is, um, but what’s different about what we do is we look, we look at the empirical brain connectivity and it’s quite sparse, at least if you’re not using correlation, it’s quite, it’s quite sparse. You like the structural connectivity at the, at the like large scale. Um, whereas, you know, artificial neural networks will start out with these, like all the connections that are randomly weighted. And I do wonder if, um, sparsity is a big role here. That’s just the beginning now, right? Like sparsity and men, what, you know, what, what is it about the particular organization that’s helping shape activity flow and create these computations that generalize  </p>



<p>Paul    01:00:17    One of the reasons why I’m asking and I’m going to kind of keep pushing on this just a little bit, just to build up, I suppose, is something like a agenda Carlo convolutional, neural network, trained to perform object recognition. And that’s not really what vision is right. To solve static objects because we’re in this constant flow of doing quote-unquote vision while we’re doing seven other things. Um, and you know, uh, paying attention to our earned internal, uh, homeostatic signals, et cetera, et cetera. But, and it’s not enough just to like show movies because yes, that’s that’s movement, but it’s also still embedded in this sort of here is a task, um, framework where the world is much more. And I guess I could, um, allude to the push for ecologically valid tasks, but I still say task, but, but our interaction with the world is much more dynamic and flowing. And, um, you know, so, um, I’m wondering if you, if you think that, and, and here I’ll say emergent properties, right? So if you, if you think that using these kinds of inter-regional, um, approaches where you have more dynamic interactions among the different regions, however they’re connected, et cetera, uh, whether, you know, we, we might be able to explain inch closer to explaining, um, more of our subjective awareness or our internal cognitive flow, um, you know, of, of processing that we experienced. That was a mouthful. Sorry.  </p>



<p>Michael    01:01:52    That’s really interesting. So, yeah, so I think in order to really get the kind of dynamic interactions with the world, we’re really gonna need to be modeling multiple brain regions at the same time, but then not just that, but how they interact with each other. And so we’ve really emphasized going all the way. Ideally from stimulus to response, we focus really on that feed forward process for now, and it’s really about experimental attractability there. Um, but the key is right. There’s no one brain region that’s going to go all the way from stimulus to response. So we’re really going to need all these inter brain region interactions. And then, uh, yeah, once we get the before process figured out in some probably limited context, cause it’s a huge challenge. Um, then I can imagine worrying more about feedback, which is going to be, you know, let’s say the feed forward processes, uh, a lot of contexts it’s most of the problem, right?  </p>



<p>Michael    01:02:53    If you’re just like kind of passively, I don’t know, watching TV, uh, playing a video game or something, maybe that’s most of it, but um, other contexts it’s, it’s just a small part of it. In reality, most contexts feed forward and feedback are just constantly dynamically updating, but action perception cycle. Um, but yeah, I mean, at a minimum, yeah. You’d want multiple brain regions involved in your model. And so what we found is that if we took the activity from those things, I just described now the sensory input, the task context or rule representations, and also the, the motor responses. Then we were able to actually simulate that and generate, uh, a task performing model from empirical brand data. The trickiest part was, was in the middle. Like how do you integrate the task rule representations? So they’re going there’s activity flowing through the rest of the state connections somewhere.  </p>



<p>Michael    01:03:56    And there’s sensory information flowing through the rest of state connections somewhere. And we want to know where is that? And that’s equivalent to the hidden layer and an artificial neural network. It’s just like, it’s just thrown out there like, oh, clearly there’s this hidden layer. And in the literature it’s talked about is association cortex, which is most of cortex in humans, right? So it’s like, where is that exactly? Right. So this is part of the, this is kind of a major issue. Um, and actually an opportunity for advancing understanding by saying like, no, that’s actually figure out what, where th this theoretical construct that hidden layer it is, these are the connection or, sorry, these are the conjunction injunction. Hubs is what we call the hidden layer actually plays a lot of different roles in a lot of different networks. So in this particular situation, uh, it’s at the conjunction between, uh, the context, uh, you know, task role representations and the stimulus input.  </p>



<p>Michael    01:04:57    Um, and then, so what we ended up doing, um, so there were a lot of different strategies. We thought of what we ended up doing is actually building an artificial neural network that could perform the task. Um, and then looking at what’s called the representational geometry of the hidden layer and then using a representational similarity analysis to look at where, which brain regions have a similar representational geometry. So they, you know, the similarity of the activity patterns matches what’s going on in the hidden layer. So just to go over this pre-print, uh, ITO at all 2121, pre-print on the ENN, there are basically, uh, three, let’s say four steps to it. So what we, the big ideas that we wanted to take the actual empirical brain data for the activity patterns and use empirical function, connectivity to link together these different brain regions all the way from stimulus to response.  </p>



<p>Michael    01:05:57    And so we start with the sensory input. We decode sensory areas to ensure that we actually have the information that’s relevant to the task and these regions, we then, uh, also decode the task context. So this is all using that pro paradigm that I talked about earlier, by the way. So you have all these 64 different tasks rules that are recombined, and we do code each of those tasks, uh, and find brain regions that actually have that information in them. We then, uh, use functional connectivity to, uh, stimulate the activity flow that would go into what we might call the hidden layer or, uh, what we recall specifically conjunction hubs. Cause it’s the conjunction between the sensory and put in tasks context, we then apply a non-linearity there, uh, which turns out to be pretty important. And then after that, we do another activity close to up to M one.  </p>



<p>Michael    01:06:54    So the output regions, and then that’s our prediction of behavior, right? So we’ve gone all the way from sensory input to motor output in a context dependent decision-making task. And then we decode, uh, what motor response is happening. And it’s not just a normal decoding, by the way, it’s, it’s trained on actual, empirical. This is how people press buttons. And this is what happens in primary motor cortex when they do so we’re actually, um, decoding in the form of that M one uses to represent these button presses and then we get above chance accuracy. So that’s, that’s actually, uh, uh, for task performing brand model from empirical data  </p>



<p>Paul    01:07:36    Now training with zero training using the  </p>



<p>Michael    01:07:41    That’s awesome. I’m trying to think. Oh yeah. The other thing that theory predicted that made us think we were really going to have to do this, but we weren’t totally sure was, um, whether we needed a nonlinearity at the hidden layer. So the, uh, there’s a model by, uh, uh, John Cohen, uh, Dunbar and, uh, Jim McClellan in 1990 is the Stroop model where they introduced this, uh, context layer to compliment the hidden layer. So that we think of that as like, you know, where the rules are represented, the context layer. Um, so they made a big deal in that paper about the nonlinearity and the hidden layer is really important. It’s kind of like an attention kind of mechanism where it’s, you’re selecting the representations that are going to, um, basically filter the stimuli according to the task context so that you select the correct motor responses. Um, and so lo and behold, we did need a, non-linearity just like we thought we would. Um, I mean, for theoretical reasons, you think so, right? Because it’s context dependent, decision-making you need this interaction so that it’s like contingent, right? So if the stimulus, the stimulus can go to totally different motor responses, the same exact stimulus, it depends on the rule. And so that there’s a non-linear interaction that has to happen, so you can select the correct one. So, so I thought that was pretty cool that that came out of the, the work.  </p>



<p>Paul    01:09:09    One of the things that I like that, um, that you are in pursuit of is so you have connections, right? And that’s all networks, and you can talk about the properties of those connections. If this is like network neuroscience, right? Where you talk about path length and, um, the, you know, different metrics of how to characterize a static essentially network, uh, and then you have functional, um, connectivity between them and what your work is doing is, is bringing those two things together. Do you think that, but it’s, it’s still essentially all networks, right. Do you think that, um, this sort of network, vernacular and approach also looking at the dynamics and like you just were talking about the non-linearities, uh, but, and looking at activity flows within networks, what you think that’s going to be enough to quote unquote, explain cognition, or do we, will we need to talk about multi-scale multi multi-level scale, uh, organizational, uh, components.  </p>



<p>Michael    01:10:13    Yeah. So one reason I went down this path of, um, making these, uh, empirically estimated neural networks or these network coding models, um, show return we want to use was to make that an empirical question basically. Um, it was a bit like, you know, do I think I really had a couple of moments like this kind of like, do I think that this is the real kind of activity, like they use in models? And I said, I don’t know, maybe not, but I should try it and see, and then I’ve been surprised, you know, that it, you know, these things, uh, I’m sure there’s going to be limits to it, but, um, it does seem to be some sort of equivalents there. And so I, uh, like I said earlier about the, um, does saying, like, we probably won’t be able to like model someone playing a complex piano piece using that for MRI, there’s going to be similar limits at whatever scale where we are and I’m I’m, but I’m hopeful.  </p>



<p>Michael    01:11:19    Right. I think it’s plausible to say, like, we could make these tasks that are a little bit artificial, but still informative enough. Uh, you know, it’s a forced choice between two button presses because maybe we can decode the right versus left hand really easily or something like that. Um, and, but you can still get the key network computations, the network mechanisms, um, as long as you, you know, maybe construct the task appropriately, like if we were able to do that, I would be very happy and then it would be like, oh no, we can’t, you know, do this really subtle thing. And then yeah. Then you’ll have to get into, you know, very fine grain things. Uh, there’s also is the question of like, like when I say, okay, um, there’s this connectivity pattern between these two regions and I have all these voxels inside there. So it’s like pretty fine grain on, on, in some sense, but you always could say like these, between these two voxels, what exactly is the physical basis of that? And you go all the way down to individual synapses and explaining that, right. So there’s always levels here. It’s just whether we are at a level where we can say, we’re pretty satisfied with our explanation of this cognitive process. Um, and I’m hopeful that, you know, we’ll get pretty far at this level, but you never know till you try.  </p>



<p>Paul    01:12:42    Oh, see, there’s more, more, uh, optimism also. Uh,  </p>



<p>Michael    01:12:46    So it’s overall optimistic. Wasn’t  </p>



<p>Paul    01:12:50    So Mike, this is ostensibly a show about neuroscience and AI and, you know, often what gets left, uh, off the table in these conversations. And I’m going to make sure and include it in ours is the potential for like your work, for instance, and this kind of approach for actually influencing and benefiting AI. Because right now we’re in this place where, you know, we’re using all these deep learning, we, uh, you guys are using all these deep learning models, even though you hate learning and don’t use deep learning of course. But, um, but the deep learning model approach is, um, is, uh, the flow I’ll say is much more toward neuroscience and benefiting how we’re understanding brains. But of course, um, the whole deep learning approach was began. The whole deep learning approach began with the concepts of concept of neural networks. Right. So, so the activity flow does go both ways. Do you feel like, um, these models that you’re building for instance, uh, will have implications for, or benefits for AI?  </p>



<p>Michael    01:14:01    Uh, yeah. It’s actually on multiple fronts, I guess you resume out a little bit. Um, so the one reason I was interested in the rapid instructed task learning stuff was because, um, I am actually interested in learning, but I’m interested in how, you know, humans learn some things much more rapidly than artificial neural networks. And so, you know, it’s possible that some of the insights we get from, uh, the riddle work will translate into, you know, being able to just instruct a machine verbally to do some tasks like you would another person. Um, and also just the general ability to flexibly, reorient to, and reuse concepts and, uh, I guess task rules or task information. And then in terms of the activity flow models, like the Yana and, um, that’s, that’s a little bit more where I’m just, yeah, I think I already described it a little bit, just like will something emerge from these things that, uh, is in biological tissue that we’re able to simulate and then just be kind of surprised by it’s the ability to generalize. It’s a little more of a bottom up kind of thing than the rural work where we’re, we’re, you know, we have this kind of cognitive theoretical target. And I get, I guess, because, you know, I am trying to merge the two whenever I can. That that would be the ultimate, right. If, if it was like re re simulate riddle, and then, uh, it works and then it’s like, we, we dig into how the model is working and we say, oh, this AI model is just did this one thing generalized to allow generalization  </p>



<p>Paul    01:15:46    On the same lines as like the system one system to the, um, you know, kind of in system one system, two difference and, or the, uh, AI needs a prefrontal cortex push from Bengio and O’Reilly and those sorts of folks, do you see?  </p>



<p>Michael    01:16:03    Yeah, it’s totally related to that. Yeah. So like, you know, I worked with Walt Schneider who, you know, had the controlled versus automatic processing, which maps like even, you know, I believe condiments said it maps one to one system, one system, two concept. Um, so yeah, I mean, controlled processing, but this particular, uh, flavor of control processing that is really about novel task behavior and transferring, um, abilities and to novel situations and that which is directly related to, uh, general human intelligence, which is another topic that I really dug into when I was working with Todd braver. So general fluid intelligence is this really fascinating concept in psychology. That’s really about individual differences and is directly related to, uh, riddle abilities. Um, they, they actually correlate quite strongly. And so if we could really, you know, figure out what’s going on, like why do, why do humans have this?  </p>



<p>Michael    01:17:05    Um, isn’t, it is, it is a factor analytic thing that they can see in the statistics that, that each individual, it seems to have this general ability that generalizes across a bunch of different tasks. Um, what is that, you know, where, where is that in the brain? And like, what’s the mechanism behind that? You know, maybe once we figured that out, we can copy that over for AI. And then I guess there’s the term artificial general intelligence, and I’m talking about natural general intelligence, right. And maybe there’s some way to learn from one and ticket over to the other.  </p>



<p>Paul    01:17:37    Well, I, I, those, um, control processes, are we going to be talking more like in symbols and rather than lower level network properties, are we going to end up, you know, having, having this mesh between symbolic, uh, and neural network type of, uh, architectures,  </p>



<p>Michael    01:17:59    From what I understand, that was a really hot topic, like in the late 1980s, early 1990s. And it was seemingly going that way. And then it that’s when I rolled them out, I read about it more, uh, you know, the old literature. So I don’t, I haven’t been following the recent stuff, but I, I guess my, you know, having thought about it for a long time now from that, that older literature, my thought was, you know, let’s just figure out how the actual biological tissue does the symbol, like stuff. And then, uh, then we can still just stay in this distributed architecture. And you had the benefit of right, like mapping and potentially a one to one on one to the human brain, like we’re trying to do with Enns. Um, right. If we start putting in these abstract symbolic modules, then it’d be like, well, wait, where exactly does it map onto? And then it’s like, can we go any deeper into that? And not really, I guess you might be able to find, you know, in maps onto a brain region, but not the inner workings. I bet wouldn’t, you know, map very well. So  </p>



<p>Paul    01:19:04    Very good. Well, so in our final few minutes here, and thanks for hanging with me for so long, what do you, I know you’re, you’re working on multiple fronts. We, we talked mostly about just one of the things that you’re working on, but I just want to ask you what, uh, like last night after you brushed your teeth, you know, and, uh, flossed and put your anti-aging cream on, you know, and, uh, laid down, uh, w what did you think about, what, what, what kept you up longer than you should have been up?  </p>



<p>Michael    01:19:32    Um, you know, I thought, uh, I, I mentioned earlier something about causal inference, and I guess that keeps coming, coming up for me as, you know, central to not just what I’m working on, but really neuroscience and science in general. You know, it’s a really hard problem, especially in complex systems, like, um, you know, the brain and even these AI systems. So one big idea that, uh, we’ve been pursuing in my lab is just this idea of using causality a kind of common ontology for different areas of neuroscience. And it’s really based on a general hypothesis that causal interactions among neural populations. Uh, we are really thinking that those will end up being the most critical features for explaining the neural basis of cognition. Uh, of course there’s a lot of other things, but if you have things, if you have neural processes described in terms of causal properties and these kinds of activity flow processes that have been talking about, and that’s going to maybe be the main way of describing like an explanation for how some kind of cognitive process emerges, uh, is generated.  </p>



<p>Michael    01:20:52    So, um, there’s tons of other details, of course, but you could think of them more as modifying that process, right? There’s sets a processes. So you have a non-linearity at one step that’s about selecting a subset of the activity flows that then, uh, change how things happen downstream. Um, you also have lots of concepts like, uh, confounders, um, causer colliders that will take a while to get into, but all of those things, I think together again, to be really important for getting explanations for brain function and, uh, how cognition emerges from, uh, neural populations, like the kind of explanations that would be actually satisfied by potentially, oh, yeah. One thing I will say that I’ve been, I’ve been thinking about recently, um, along these lines is the concept of, uh, what I call causal sufficiency. So I don’t know, maybe, you know, this is already out there, I just haven’t come across it.  </p>



<p>Michael    01:21:53    But the idea is, you know, even if you ablate or lesion or region, you can show that it’s causally necessary, but you don’t know if that brain region say was causally sufficient to make the cognitive process. And that’s where these models can come in, right? Like the ENN or even an ANM, uh, or, or any sort of model, right. You actually generate the process and you could show, especially if it’s empirically constrained, you can say, this is equivalent on all these ways to the actual biology and then where it generates the kind of the process of interest. So, you know, at the very least it’s causally sufficient. And, um, then, you know, you also would like to have some of these lesions and stimulation to show causal and necessity potentially, but you could even imagine, like say there’s like two different pathways that can accomplish the same cognitive process. So you, you ablate one in, it does nothing potentially you ablate the other. It does nothing, but really they’re both causally, maybe sufficient for generating the, kind of the process  </p>



<p>Paul    01:23:00    That speaks to work like from, you know, like Eve martyr and the idea of multiple realizability and how, you know, in the end, anyway, we’re activating our muscles right. To perform some tasks. So it might, you might get away with being pretty ugly internally and still come out with the right behavior. And, you know, this is what everyone’s interested in, I suppose, or what we’re testing. The vast majority of it is behavior anyway.  </p>



<p>Michael    01:23:28    Right? Yeah. I did have some interesting reviews for something that I was working on with the ENN where I was emphasizing behavior. And that’s like, kind of the, it was, I felt like it was like a holy, the holy grail. Like if I can predict behavior, well, you know, that’s how, you know, things are index, but then I had like, sort of hearsay, well, like all you’re doing is predicting motor behavior. You know, what about cognitive processes? I’m like, oh, what, like, that’s what we have been doing. And it’s the innovation is that we’re getting all the way to behavior now. So then I just I’ll have to say is like, no, we’ve been doing that. That’s, that’s, that’s the, I have to make the cognitive process to predict and one behavior  </p>



<p>Paul    01:24:14    And they accepted,  </p>



<p>Michael    01:24:16    Oh, I’m still in the process of, so we’ll see, we’ll see right here, this explanation and be like, oh yeah.  </p>



<p>Paul    01:24:26    W well, I’ll push the, oh, I can’t, I got to air it in a few days, man. I can’t push this out until it’s accepted. So, sorry, we’ll bleep that, I suppose. Um, so finally, Mike, uh, I want to ask you career, uh, career type question here. So, uh, I knew you back in graduate school. I know you did a lot of stuff before that. You’ve had a lot of good advisors, you know, throughout, and I know you’ve worked extremely hard, which I’ve always been impressed with. Uh, it seems like you’re always on focus and on point, I’m wondering if there’s a time throughout your career or, you know, a specific time, or I’m sure there are multiple times, but if you could tell a story about some time that when you feel like luck played a integral part of some success in your career,  </p>



<p>Michael    01:25:17    Um, yeah. So I guess, um, the early interest in what later became known as network neuroscience, uh, yeah, like, and I guess that really started and marked Esposito’s lab. I just lucky that I ended up in his lab and then continued along that line. Um, and the reason it’s lucky is because it’s beyond me that, you know, the rest of the field really went in that direction so that, you know, I could, I didn’t have to swim upstream, I guess, to like, make progress on that. I, there was a really current going on. And, um, and then also that I was at Washington university. So that was when I was working with Todd braver. And also Steve Peterson, when the human connectome project was started there, I wasn’t actually involved in it, but I was right there. And I had all these advantages for like, knowing about it and what it involved and being able to like ask questions about the data early on. And, and that was just like this treasure trove of, of, you know, questions that we could ask without having to even collect new data, ask a bunch of questions and, you know, and the analysis took a long time and were a lot of work, but it wasn’t nearly as hard as, you know, designing experiments and also, you know, designing experiments and collecting data and then the large and actually made for much more robust conclusions and statistics. So is anyway, all that I have to say that was, that was luck.  </p>



<p>Paul    01:26:54    Is it possible to parlay that kind of serendipity into advice for aspiring people, maybe people who aren’t, who feel like they haven’t been so lucky or they are swimming upstream, is it even possible or is it just, is the only thing to say, um, that those are just, uh, lucky events.  </p>



<p>Michael    01:27:18    I’ll say that, you know, there are a lot of people at wash U when I was there that didn’t work with the human connection project data. So, I mean, I guess it’s like, what’s that saying, like seize the deck favors the prepared my under excuse the day that that works too. Yeah. You know, I dunno, just really look for opportunities wherever you are and, and kind of, you know, you’re required me to change what I was going to do. Right. Like, you know, even if we didn’t even have a plan, you know, I, I made that my plan instead of something else. So it wasn’t pure, you know, like I’m just totally passive. Uh, there was some kind of like seizing the opportunity. And then there’s also, I guess, in this particular case, um, some intuition. So I dunno, I dunno if that’s like, you can totally plan on that, but  </p>



<p>Paul    01:28:17    I’ll also be smart.  </p>



<p>Michael    01:28:21    Just like think plausibly, you know, if this trims or this little, this little idea, actually, because it was before it was a trend, I guess, or early, early days of the trend, um, if this kept going, is it even plausibly going to lead to anything? It was like, okay, the brain is a network, you know, we already have known that forever. So like, yeah, studying the brain is a network. Seems like a good idea. So you know, that kind of general logic I think could help, but yeah. I mean, it, you can’t really make general advice on this. I don’t think it’s just like, in this case important factors.  </p>



<p>Paul    01:29:02    Yeah. The only advice you can give is like, you have to work super hard and, and, and develop skills in whatever you’re doing and I guess be willing to change. Right. And seize the day when something like that comes along and it feels right and seems right. I don’t.  </p>



<p>Michael    01:29:18    Yeah. Yeah. Um, yeah. What is that called? Like exploration, exploitation trade-off or,  </p>



<p>Paul    01:29:25    Yeah, but then the boy that’s a whole, that’s a whole other bag though, open, but yet you have to explore and then explode, explode, explode, exploit, and then explore. And I don’t know the, but I don’t know the perfect pattern for that either. That’s something a returning theme actually that I don’t know that there’s the right. I can’t that I can write out that algorithm, but right. All right. All right. I won’t keep you any longer. Thank you, Mike, for coming on. Thanks for answering those guests questions as well. And, uh, I love the work and continued success to you.  </p>



<p>Michael    01:29:56    Oh, thank you. Thanks for having me on it. It’s been great talking and the guest questions were a real highlight. It was great to hear from some old friends.  </p>



<p>Paul    01:30:10    Brain inspired is a production of me and you. I don’t do advertisements. You can support the show through Patrion for a trifling amount and get access to the full versions of all the episodes. Plus bonus episodes that focus more on the cultural side, but still have science go to brain inspired.co and find the red Patrion button there to get in touch with me, emailPaul@brandinspired.co. The music you hear is by the new year. Find them@thenewyear.net. Thank you for your support. See you next time.  </p>

</div></div>


<p>0:00 – Intro<br />4:58 – Cognitive control<br />7:44 – Rapid Instructed Task Learning and Flexible Hub Theory<br />15:53 – Patryk Laurent question: free will<br />26:21 – Kendrick Kay question: fMRI limitations<br />31:55 – Empirically-estimated neural networks (ENNs)<br />40:51 – ENNs vs. deep learning<br />45:30 – Clinical relevance of ENNs<br />47:32 – Kanaka Rajan question: a proposed collaboration<br />56:38 – Advantage of modeling multiple regions<br />1:05:30 – How ENNs work<br />1:12:48 – How ENNs might benefit artificial intelligence<br />1:19:04 – The need for causality<br />1:24:38 – Importance of luck and serendipity</p>
]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/b5b044a9-1fca-40ad-8846-8d85476ef081-116-Mike-Cole-public.mp3" length="87991556"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[




Support the show to get full episodes and join the Discord community.











Mike and I discuss his modeling approach to study cognition. Many people I have on the podcast use deep neural networks to study brains, where the idea is to train or optimize the model to perform a task, then compare the model properties with brain properties. Mike’s approach is different in at least two ways. One, he builds the architecture of his models using connectivity data from fMRI recordings. Two, he doesn’t train his models; instead, he uses functional connectivity data from the fMRI recordings to assign weights between nodes of the network (in deep learning, the weights are learned through lots of training). Mike calls his networks empirically-estimated neural networks (ENNs), and/or network coding models. We walk through his approach, what we can learn from models like ENNs, discuss some of his earlier work on cognitive control and our ability to flexibly adapt to new task rules through instruction, and he fields questions from Kanaka Rajan, Kendrick Kay, and Patryk Laurent.



The Cole Neurocognition lab.Twitter: @TheColeLab.Related papersDiscovering the Computational Relevance of Brain Network Organization.Constructing neural network models from brain data reveals representational transformation underlying adaptive behavior.Kendrick Kay’s previous episode: BI 026 Kendrick Kay: A Model By Any Other Name.Kanaka Rajan’s previous episode: BI 054 Kanaka Rajan: How Do We Switch Behaviors?


Transcript

Michael Cole. Uh, you and I go back a few years, welcome to the  



Michael    00:04:01    Podcast. Thanks for having me on.  



Paul    00:04:04    So, uh, we, well, I say we go back a few years. Uh, it’s more like, uh, I’ve just been admiring you from afar. I guess you were one year ahead of me in graduate school at the CNBC at Pitt and CMU. And you’ve gone on to be many years ahead of me. It turns out,  



Michael    00:04:21    I don’t know about that. You’re, you’re pretty, I don’t know, like intellectually, I feel like there’s, I’ve been following you from afar, I guess I should say in the form of the podcast, as soon as I heard you had this podcast, I started listening. I haven’t heard all your episodes yet, so many, but I I’ve heard, but, uh, yeah, I dunno, like I can, I can see that you’ve really, uh, expanded your horizons and I’m a little jealous that you have, like the time and, and, uh, I guess, space to be having these really awesome conversations with such a variety of people.  



Paul    00:04:58    Well, uh, well today’s topic is about, uh, the jealousy that I have for you and what you’re doing, so  



Mi...]]>
                </itunes:summary>
                                                                            <itunes:duration>01:31:20</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 115 Steve Grossberg: Conscious Mind, Resonant Brain]]>
                </title>
                <pubDate>Sat, 02 Oct 2021 14:36:51 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-115-steve-grossberg-conscious-mind-resonant-brain</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-115-steve-grossberg-conscious-mind-resonant-brain</link>
                                <description>
                                            <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2021/09/art-115-01.jpg" alt="" class="wp-image-1444" />



<p class="has-text-align-center"><strong>Support the show to get full episodes and join the Discord community</strong></p>



<div class="wp-block-image"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2021/09/become_a_patron_button@2x.png" alt="" class="wp-image-1434" /></div>



<div class="wp-block-image"><img width="229" height="300" src="https://braininspired.co/wp-content/uploads/2020/08/download.jpeg" alt="" class="wp-image-1099" /></div>



<p>Steve and I discuss his book <a href="https://www.amazon.com/gp/product/B094W6BBKN/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=B094W6BBKN&amp;linkId=edb32e098e6dba02c0471d73d8b39e64">Conscious Mind, Resonant Brain: How Each Brain Makes a Mind</a>.  The book is a huge collection of his models and their predictions and explanations for a wide array of cognitive brain functions. Many of the models spring from his Adaptive Resonance Theory (ART) framework, which explains how networks of neurons deal with changing environments while maintaining self-organization and retaining learned knowledge. ART led Steve to the hypothesis that all conscious states are resonant states, which we discuss. There are also guest questions from <a href="https://braininspired.co/podcast/84/">György Buzsáki</a>, <a href="https://braininspired.co/podcast/30/">Jay McClelland</a>, and <a href="https://braininspired.co/podcast/113/">John Krakauer</a>.</p>



<div class="wp-block-image"><a href="https://www.amazon.com/gp/product/B094W6BBKN/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=B094W6BBKN&amp;linkId=edb32e098e6dba02c0471d73d8b39e64"><img src="https://braininspired.co/wp-content/uploads/2021/09/bookThumb.jpeg" alt="" class="wp-image-1445" width="193" height="250" /></a></div>



<ul><li>Steve’s <a href="https://sites.bu.edu/steveg/">BU website</a>.</li><li><a href="https://www.amazon.com/gp/product/B094W6BBKN/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=B094W6BBKN&amp;linkId=edb32e098e6dba02c0471d73d8b39e64">Conscious Mind, Resonant Brain: How Each Brain Makes a Mind</a></li><li>Previous Brain Inspired episode:<ul><li><a href="https://braininspired.co/podcast/82/">BI 082 Steve Grossberg: Adaptive Resonance Theory</a></li></ul></li></ul>



<img width="775" height="524" src="https://braininspired.co/wp-content/uploads/2021/10/Rossberg_2_website.png" alt="" class="wp-image-1448" />“Sweet Data Dreams” by <a href="https://twitter.com/d4phn3c">Daphne Cornelisse</a>.



<p>0:00 – Intro<br />2:38 – Conscious Mind, Resonant Brain<br />11:49 – Theoretical method<br />15:54 – ART, learning, and consciousness<br />22:58 – Conscious vs. unconscious resonance<br />26:56 – Györy Buzsáki question<br />30:04 – Remaining mysteries in visual system<br />35:16 – John Krakauer question<br />39:12 – Jay McClelland question<br />51:34 – Any missing principles to explain human cognition?<br />1:00:16 – Importance of an early good career start<br />1:06:50 – Has modeling training caught up to experiment training?<br />1:17:12 – Universal development code</p>
]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[




Support the show to get full episodes and join the Discord community











Steve and I discuss his book Conscious Mind, Resonant Brain: How Each Brain Makes a Mind.  The book is a huge collection of his models and their predictions and explanations for a wide array of cognitive brain functions. Many of the models spring from his Adaptive Resonance Theory (ART) framework, which explains how networks of neurons deal with changing environments while maintaining self-organization and retaining learned knowledge. ART led Steve to the hypothesis that all conscious states are resonant states, which we discuss. There are also guest questions from György Buzsáki, Jay McClelland, and John Krakauer.







Steve’s BU website.Conscious Mind, Resonant Brain: How Each Brain Makes a MindPrevious Brain Inspired episode:BI 082 Steve Grossberg: Adaptive Resonance Theory



“Sweet Data Dreams” by Daphne Cornelisse.



0:00 – Intro2:38 – Conscious Mind, Resonant Brain11:49 – Theoretical method15:54 – ART, learning, and consciousness22:58 – Conscious vs. unconscious resonance26:56 – Györy Buzsáki question30:04 – Remaining mysteries in visual system35:16 – John Krakauer question39:12 – Jay McClelland question51:34 – Any missing principles to explain human cognition?1:00:16 – Importance of an early good career start1:06:50 – Has modeling training caught up to experiment training?1:17:12 – Universal development code
]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 115 Steve Grossberg: Conscious Mind, Resonant Brain]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2021/09/art-115-01.jpg" alt="" class="wp-image-1444" />



<p class="has-text-align-center"><strong>Support the show to get full episodes and join the Discord community</strong></p>



<div class="wp-block-image"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2021/09/become_a_patron_button@2x.png" alt="" class="wp-image-1434" /></div>



<div class="wp-block-image"><img width="229" height="300" src="https://braininspired.co/wp-content/uploads/2020/08/download.jpeg" alt="" class="wp-image-1099" /></div>



<p>Steve and I discuss his book <a href="https://www.amazon.com/gp/product/B094W6BBKN/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=B094W6BBKN&amp;linkId=edb32e098e6dba02c0471d73d8b39e64">Conscious Mind, Resonant Brain: How Each Brain Makes a Mind</a>.  The book is a huge collection of his models and their predictions and explanations for a wide array of cognitive brain functions. Many of the models spring from his Adaptive Resonance Theory (ART) framework, which explains how networks of neurons deal with changing environments while maintaining self-organization and retaining learned knowledge. ART led Steve to the hypothesis that all conscious states are resonant states, which we discuss. There are also guest questions from <a href="https://braininspired.co/podcast/84/">György Buzsáki</a>, <a href="https://braininspired.co/podcast/30/">Jay McClelland</a>, and <a href="https://braininspired.co/podcast/113/">John Krakauer</a>.</p>



<div class="wp-block-image"><a href="https://www.amazon.com/gp/product/B094W6BBKN/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=B094W6BBKN&amp;linkId=edb32e098e6dba02c0471d73d8b39e64"><img src="https://braininspired.co/wp-content/uploads/2021/09/bookThumb.jpeg" alt="" class="wp-image-1445" width="193" height="250" /></a></div>



<ul><li>Steve’s <a href="https://sites.bu.edu/steveg/">BU website</a>.</li><li><a href="https://www.amazon.com/gp/product/B094W6BBKN/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=B094W6BBKN&amp;linkId=edb32e098e6dba02c0471d73d8b39e64">Conscious Mind, Resonant Brain: How Each Brain Makes a Mind</a></li><li>Previous Brain Inspired episode:<ul><li><a href="https://braininspired.co/podcast/82/">BI 082 Steve Grossberg: Adaptive Resonance Theory</a></li></ul></li></ul>



<img width="775" height="524" src="https://braininspired.co/wp-content/uploads/2021/10/Rossberg_2_website.png" alt="" class="wp-image-1448" />“Sweet Data Dreams” by <a href="https://twitter.com/d4phn3c">Daphne Cornelisse</a>.



<p>0:00 – Intro<br />2:38 – Conscious Mind, Resonant Brain<br />11:49 – Theoretical method<br />15:54 – ART, learning, and consciousness<br />22:58 – Conscious vs. unconscious resonance<br />26:56 – Györy Buzsáki question<br />30:04 – Remaining mysteries in visual system<br />35:16 – John Krakauer question<br />39:12 – Jay McClelland question<br />51:34 – Any missing principles to explain human cognition?<br />1:00:16 – Importance of an early good career start<br />1:06:50 – Has modeling training caught up to experiment training?<br />1:17:12 – Universal development code</p>
]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/b05d59a4-7120-4f7a-b2a4-dc0d8c154ff0-115-Steve-Grossberg-public.mp3" length="80639936"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[




Support the show to get full episodes and join the Discord community











Steve and I discuss his book Conscious Mind, Resonant Brain: How Each Brain Makes a Mind.  The book is a huge collection of his models and their predictions and explanations for a wide array of cognitive brain functions. Many of the models spring from his Adaptive Resonance Theory (ART) framework, which explains how networks of neurons deal with changing environments while maintaining self-organization and retaining learned knowledge. ART led Steve to the hypothesis that all conscious states are resonant states, which we discuss. There are also guest questions from György Buzsáki, Jay McClelland, and John Krakauer.







Steve’s BU website.Conscious Mind, Resonant Brain: How Each Brain Makes a MindPrevious Brain Inspired episode:BI 082 Steve Grossberg: Adaptive Resonance Theory



“Sweet Data Dreams” by Daphne Cornelisse.



0:00 – Intro2:38 – Conscious Mind, Resonant Brain11:49 – Theoretical method15:54 – ART, learning, and consciousness22:58 – Conscious vs. unconscious resonance26:56 – Györy Buzsáki question30:04 – Remaining mysteries in visual system35:16 – John Krakauer question39:12 – Jay McClelland question51:34 – Any missing principles to explain human cognition?1:00:16 – Importance of an early good career start1:06:50 – Has modeling training caught up to experiment training?1:17:12 – Universal development code
]]>
                </itunes:summary>
                                                                            <itunes:duration>01:23:41</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 114 Mark Sprevak and Mazviita Chirimuuta: Computation and the Mind]]>
                </title>
                <pubDate>Wed, 22 Sep 2021 10:15:39 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-114-mark-sprevak-and-mazviita-chirimuuta-computation-and-the-mind</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-114-mark-sprevak-and-mazviita-chirimuuta-computation-and-the-mind</link>
                                <description>
                                            <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2021/09/art-114-01.png" alt="" class="wp-image-1438" />



<p class="has-text-align-center"><strong>Support the show to get full episodes and join the Discord community</strong></p>



<div class="wp-block-image"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2021/09/become_a_patron_button@2x.png" alt="" class="wp-image-1434" /></div>



<ul class="blocks-gallery-grid"><li class="blocks-gallery-item"><img width="606" height="457" src="https://braininspired.co/wp-content/uploads/2021/09/marksprevak.jpg" alt="" class="wp-image-1439" /></li><li class="blocks-gallery-item"><img width="680" height="600" src="https://braininspired.co/wp-content/uploads/2021/09/MC-pic.jpg" alt="" class="wp-image-1440" /></li></ul>



<p>Mark and Mazviita discuss the philosophy and science of mind, and how to think about computations with respect to understanding minds. Current approaches to explaining brain function are dominated by computational models and the computer metaphor for brain and mind. But there are alternative ways to think about the relation between computations and brain function, which we explore in the discussion. We also talk about the role of philosophy broadly and with respect to mind sciences, pluralism and perspectival approaches to truth and understanding, the prospects and desirability of naturalizing representations (accounting for how brain representations relate to the natural world), and much more.</p>



<div class="wp-block-image"><a href="https://www.amazon.com/gp/product/0367733668/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=0367733668&amp;linkId=780824e84b72c8a7c58ce501b97bbce8"><img src="https://braininspired.co/wp-content/uploads/2021/09/routledge-thumb.jpg" alt="" class="wp-image-1441" width="266" height="374" /></a></div>



<ul><li><a href="https://marksprevak.com/">Mark’s website</a>.</li><li><a href="https://www.ed.ac.uk/profile/mazviita-chirimuuta">Mazviita’s University of Edinburgh page</a>.</li><li>Twitter (Mark): <a href="https://twitter.com/msprevak">@msprevak</a>.</li><li>Mazviita’s previous Brain Inspired episode:<ul><li><a href="https://braininspired.co/podcast/72/">BI 072 Mazviita Chirimuuta: Understanding, Prediction, and Reality</a></li></ul></li><li>The related book we discuss:<ul><li><a href="https://www.amazon.com/gp/product/0367733668/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=0367733668&amp;linkId=780824e84b72c8a7c58ce501b97bbce8">The Routledge Handbook of the Computational Mind 2018 Mark Sprevak Matteo Colombo (Editors)</a></li></ul></li></ul>



<p>0:00 – Intro<br />5:26 – Philosophy contributing to mind science<br />15:45 – Trend toward hyperspecialization<br />21:38 – Practice-focused philosophy of science<br />30:42 – Computationalism<br />33:05 – Philosophy of mind: identity theory, functionalism<br />38:18 – Computations as descriptions<br />41:27 – Pluralism and perspectivalism<br />54:18 – How much of brain function is computation?<br />1:02:11 – AI as computationalism<br />1:13:28 – Naturalizing representations<br />1:30:08 – Are you doing it right?</p>
]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[




Support the show to get full episodes and join the Discord community











Mark and Mazviita discuss the philosophy and science of mind, and how to think about computations with respect to understanding minds. Current approaches to explaining brain function are dominated by computational models and the computer metaphor for brain and mind. But there are alternative ways to think about the relation between computations and brain function, which we explore in the discussion. We also talk about the role of philosophy broadly and with respect to mind sciences, pluralism and perspectival approaches to truth and understanding, the prospects and desirability of naturalizing representations (accounting for how brain representations relate to the natural world), and much more.







Mark’s website.Mazviita’s University of Edinburgh page.Twitter (Mark): @msprevak.Mazviita’s previous Brain Inspired episode:BI 072 Mazviita Chirimuuta: Understanding, Prediction, and RealityThe related book we discuss:The Routledge Handbook of the Computational Mind 2018 Mark Sprevak Matteo Colombo (Editors)



0:00 – Intro5:26 – Philosophy contributing to mind science15:45 – Trend toward hyperspecialization21:38 – Practice-focused philosophy of science30:42 – Computationalism33:05 – Philosophy of mind: identity theory, functionalism38:18 – Computations as descriptions41:27 – Pluralism and perspectivalism54:18 – How much of brain function is computation?1:02:11 – AI as computationalism1:13:28 – Naturalizing representations1:30:08 – Are you doing it right?
]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 114 Mark Sprevak and Mazviita Chirimuuta: Computation and the Mind]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2021/09/art-114-01.png" alt="" class="wp-image-1438" />



<p class="has-text-align-center"><strong>Support the show to get full episodes and join the Discord community</strong></p>



<div class="wp-block-image"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2021/09/become_a_patron_button@2x.png" alt="" class="wp-image-1434" /></div>



<ul class="blocks-gallery-grid"><li class="blocks-gallery-item"><img width="606" height="457" src="https://braininspired.co/wp-content/uploads/2021/09/marksprevak.jpg" alt="" class="wp-image-1439" /></li><li class="blocks-gallery-item"><img width="680" height="600" src="https://braininspired.co/wp-content/uploads/2021/09/MC-pic.jpg" alt="" class="wp-image-1440" /></li></ul>



<p>Mark and Mazviita discuss the philosophy and science of mind, and how to think about computations with respect to understanding minds. Current approaches to explaining brain function are dominated by computational models and the computer metaphor for brain and mind. But there are alternative ways to think about the relation between computations and brain function, which we explore in the discussion. We also talk about the role of philosophy broadly and with respect to mind sciences, pluralism and perspectival approaches to truth and understanding, the prospects and desirability of naturalizing representations (accounting for how brain representations relate to the natural world), and much more.</p>



<div class="wp-block-image"><a href="https://www.amazon.com/gp/product/0367733668/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=0367733668&amp;linkId=780824e84b72c8a7c58ce501b97bbce8"><img src="https://braininspired.co/wp-content/uploads/2021/09/routledge-thumb.jpg" alt="" class="wp-image-1441" width="266" height="374" /></a></div>



<ul><li><a href="https://marksprevak.com/">Mark’s website</a>.</li><li><a href="https://www.ed.ac.uk/profile/mazviita-chirimuuta">Mazviita’s University of Edinburgh page</a>.</li><li>Twitter (Mark): <a href="https://twitter.com/msprevak">@msprevak</a>.</li><li>Mazviita’s previous Brain Inspired episode:<ul><li><a href="https://braininspired.co/podcast/72/">BI 072 Mazviita Chirimuuta: Understanding, Prediction, and Reality</a></li></ul></li><li>The related book we discuss:<ul><li><a href="https://www.amazon.com/gp/product/0367733668/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=0367733668&amp;linkId=780824e84b72c8a7c58ce501b97bbce8">The Routledge Handbook of the Computational Mind 2018 Mark Sprevak Matteo Colombo (Editors)</a></li></ul></li></ul>



<p>0:00 – Intro<br />5:26 – Philosophy contributing to mind science<br />15:45 – Trend toward hyperspecialization<br />21:38 – Practice-focused philosophy of science<br />30:42 – Computationalism<br />33:05 – Philosophy of mind: identity theory, functionalism<br />38:18 – Computations as descriptions<br />41:27 – Pluralism and perspectivalism<br />54:18 – How much of brain function is computation?<br />1:02:11 – AI as computationalism<br />1:13:28 – Naturalizing representations<br />1:30:08 – Are you doing it right?</p>
]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/114-Mark-Sprevak-Mazviita-Chirimuuta-public.mp3" length="94498104"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[




Support the show to get full episodes and join the Discord community











Mark and Mazviita discuss the philosophy and science of mind, and how to think about computations with respect to understanding minds. Current approaches to explaining brain function are dominated by computational models and the computer metaphor for brain and mind. But there are alternative ways to think about the relation between computations and brain function, which we explore in the discussion. We also talk about the role of philosophy broadly and with respect to mind sciences, pluralism and perspectival approaches to truth and understanding, the prospects and desirability of naturalizing representations (accounting for how brain representations relate to the natural world), and much more.







Mark’s website.Mazviita’s University of Edinburgh page.Twitter (Mark): @msprevak.Mazviita’s previous Brain Inspired episode:BI 072 Mazviita Chirimuuta: Understanding, Prediction, and RealityThe related book we discuss:The Routledge Handbook of the Computational Mind 2018 Mark Sprevak Matteo Colombo (Editors)



0:00 – Intro5:26 – Philosophy contributing to mind science15:45 – Trend toward hyperspecialization21:38 – Practice-focused philosophy of science30:42 – Computationalism33:05 – Philosophy of mind: identity theory, functionalism38:18 – Computations as descriptions41:27 – Pluralism and perspectivalism54:18 – How much of brain function is computation?1:02:11 – AI as computationalism1:13:28 – Naturalizing representations1:30:08 – Are you doing it right?
]]>
                </itunes:summary>
                                                                            <itunes:duration>01:38:07</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 113 David Barack and John Krakauer: Two Views On Cognition]]>
                </title>
                <pubDate>Sun, 12 Sep 2021 09:10:52 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-113-david-barack-and-john-krakauer-two-views-on-cognition</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-113-david-barack-and-john-krakauer-two-views-on-cognition</link>
                                <description>
                                            <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2021/09/113-art-01.jpg" alt="" class="wp-image-1433" />



<div class="wp-block-image"><a href="https://www.patreon.com/braininspired" target="_blank" rel="noreferrer noopener"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2021/09/become_a_patron_button@2x.png" alt="" class="wp-image-1434" /></a></div>



<ul class="blocks-gallery-grid"><li class="blocks-gallery-item"><img width="150" height="150" src="https://braininspired.co/wp-content/uploads/2021/09/website_barack-150x150.jpg" alt="" class="wp-image-1436" /></li><li class="blocks-gallery-item"><img width="150" height="150" src="https://braininspired.co/wp-content/uploads/2019/01/JohnKrakauerMaster_hires-1-370x370-bw-150x150.jpg" alt="" class="wp-image-635" /></li></ul>



<p>David and John discuss some of the concepts from their recent paper <a href="https://www.nature.com/articles/s41583-021-00448-6">Two Views on the Cognitive Brain</a>, in which they argue the recent population-based dynamical systems approach is a promising route to understanding brain activity underpinning higher cognition. We discuss mental representations, the kinds of dynamical objects being used for explanation, and much more, including David’s perspectives as a practicing neuroscientist and philosopher.</p>



<div class="wp-block-image"><img width="800" height="535" src="https://braininspired.co/wp-content/uploads/2021/09/BI-Episode-113-100-kb.png" alt="" class="wp-image-1435" />“Juggling Philosophy and Neuroscience” by <a href="https://twitter.com/d4phn3c">Daphne Cornelisse</a>.</div>



<ul><li><a href="https://presidentialscholars.columbia.edu/directory/david-barack">David’s webpage.</a></li><li><a href="http://blam-lab.org">John’s Lab.</a></li><li>Twitter: <ul><li>David: <a href="https://twitter.com/dlbarack">@DLBarack</a></li><li>John: <a href="https://twitter.com/blamlab">@blamlab</a></li></ul></li><li>Paper: <a href="https://www.nature.com/articles/s41583-021-00448-6"><u>Two Views on the Cognitive Brain</u></a>.</li><li>John’s previous episodes:<ul><li><a href="https://braininspired.co/podcast/25/">BI 025 John Krakauer: Understanding Cognition</a></li><li><a href="https://braininspired.co/podcast/77/">BI 077 David and John Krakauer: Part 1</a></li><li><a href="https://braininspired.co/podcast/78/">BI 078 David and John Krakauer: Part 2</a></li></ul></li></ul>



<p>Timestamps</p>



<p>0:00 – Intro<br />3:13 – David’s philosophy and neuroscience experience<br />20:01 – Renaissance person<br />24:36 – John’s medical training <br />31:58 – Two Views on the Cognitive Brain<br />44:18 – Representation<br />49:37 – Studying populations of neurons<br />1:05:17 – What counts as representation<br />1:18:49 – Does this approach matter for AI?</p>



<p></p>
]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[












David and John discuss some of the concepts from their recent paper Two Views on the Cognitive Brain, in which they argue the recent population-based dynamical systems approach is a promising route to understanding brain activity underpinning higher cognition. We discuss mental representations, the kinds of dynamical objects being used for explanation, and much more, including David’s perspectives as a practicing neuroscientist and philosopher.



“Juggling Philosophy and Neuroscience” by Daphne Cornelisse.



David’s webpage.John’s Lab.Twitter: David: @DLBarackJohn: @blamlabPaper: Two Views on the Cognitive Brain.John’s previous episodes:BI 025 John Krakauer: Understanding CognitionBI 077 David and John Krakauer: Part 1BI 078 David and John Krakauer: Part 2



Timestamps



0:00 – Intro3:13 – David’s philosophy and neuroscience experience20:01 – Renaissance person24:36 – John’s medical training 31:58 – Two Views on the Cognitive Brain44:18 – Representation49:37 – Studying populations of neurons1:05:17 – What counts as representation1:18:49 – Does this approach matter for AI?




]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 113 David Barack and John Krakauer: Two Views On Cognition]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2021/09/113-art-01.jpg" alt="" class="wp-image-1433" />



<div class="wp-block-image"><a href="https://www.patreon.com/braininspired" target="_blank" rel="noreferrer noopener"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2021/09/become_a_patron_button@2x.png" alt="" class="wp-image-1434" /></a></div>



<ul class="blocks-gallery-grid"><li class="blocks-gallery-item"><img width="150" height="150" src="https://braininspired.co/wp-content/uploads/2021/09/website_barack-150x150.jpg" alt="" class="wp-image-1436" /></li><li class="blocks-gallery-item"><img width="150" height="150" src="https://braininspired.co/wp-content/uploads/2019/01/JohnKrakauerMaster_hires-1-370x370-bw-150x150.jpg" alt="" class="wp-image-635" /></li></ul>



<p>David and John discuss some of the concepts from their recent paper <a href="https://www.nature.com/articles/s41583-021-00448-6">Two Views on the Cognitive Brain</a>, in which they argue the recent population-based dynamical systems approach is a promising route to understanding brain activity underpinning higher cognition. We discuss mental representations, the kinds of dynamical objects being used for explanation, and much more, including David’s perspectives as a practicing neuroscientist and philosopher.</p>



<div class="wp-block-image"><img width="800" height="535" src="https://braininspired.co/wp-content/uploads/2021/09/BI-Episode-113-100-kb.png" alt="" class="wp-image-1435" />“Juggling Philosophy and Neuroscience” by <a href="https://twitter.com/d4phn3c">Daphne Cornelisse</a>.</div>



<ul><li><a href="https://presidentialscholars.columbia.edu/directory/david-barack">David’s webpage.</a></li><li><a href="http://blam-lab.org">John’s Lab.</a></li><li>Twitter: <ul><li>David: <a href="https://twitter.com/dlbarack">@DLBarack</a></li><li>John: <a href="https://twitter.com/blamlab">@blamlab</a></li></ul></li><li>Paper: <a href="https://www.nature.com/articles/s41583-021-00448-6"><u>Two Views on the Cognitive Brain</u></a>.</li><li>John’s previous episodes:<ul><li><a href="https://braininspired.co/podcast/25/">BI 025 John Krakauer: Understanding Cognition</a></li><li><a href="https://braininspired.co/podcast/77/">BI 077 David and John Krakauer: Part 1</a></li><li><a href="https://braininspired.co/podcast/78/">BI 078 David and John Krakauer: Part 2</a></li></ul></li></ul>



<p>Timestamps</p>



<p>0:00 – Intro<br />3:13 – David’s philosophy and neuroscience experience<br />20:01 – Renaissance person<br />24:36 – John’s medical training <br />31:58 – Two Views on the Cognitive Brain<br />44:18 – Representation<br />49:37 – Studying populations of neurons<br />1:05:17 – What counts as representation<br />1:18:49 – Does this approach matter for AI?</p>



<p></p>
]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/112-Barack-and-Krakauer-public.mp3" length="87316082"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[












David and John discuss some of the concepts from their recent paper Two Views on the Cognitive Brain, in which they argue the recent population-based dynamical systems approach is a promising route to understanding brain activity underpinning higher cognition. We discuss mental representations, the kinds of dynamical objects being used for explanation, and much more, including David’s perspectives as a practicing neuroscientist and philosopher.



“Juggling Philosophy and Neuroscience” by Daphne Cornelisse.



David’s webpage.John’s Lab.Twitter: David: @DLBarackJohn: @blamlabPaper: Two Views on the Cognitive Brain.John’s previous episodes:BI 025 John Krakauer: Understanding CognitionBI 077 David and John Krakauer: Part 1BI 078 David and John Krakauer: Part 2



Timestamps



0:00 – Intro3:13 – David’s philosophy and neuroscience experience20:01 – Renaissance person24:36 – John’s medical training 31:58 – Two Views on the Cognitive Brain44:18 – Representation49:37 – Studying populations of neurons1:05:17 – What counts as representation1:18:49 – Does this approach matter for AI?




]]>
                </itunes:summary>
                                                                            <itunes:duration>01:30:38</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI ViDA Panel Discussion: Deep RL and Dopamine]]>
                </title>
                <pubDate>Thu, 02 Sep 2021 08:55:50 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-vida-panel-discussion-deep-rl-and-dopamine</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-vida-panel-discussion-deep-rl-and-dopamine</link>
                                <description>
                                            <![CDATA[
[et_pb_section fb_built=”1″ admin_label=”Header” _builder_version=”4.9.2″ background_color=”#ad876d” background_enable_image=”off” parallax=”on” custom_padding=”0vw||0vw||true|false” custom_css_main_element=”.podcast .entry-title {||display: none;||}” background_size__hover=”cover” background_size__hover_enabled=”cover”][et_pb_row _builder_version=”4.9.2″ background_color=”#d5a570″ use_background_color_gradient=”on” background_color_gradient_start=”rgba(26,24,68,0)” background_color_gradient_end=”#231f20″ background_color_gradient_overlays_image=”on” background_enable_image=”off” background_position=”top_center” width=”100%” max_width=”100%” min_height=”940px” custom_margin=”||” custom_padding=”4vw|10%|4vw|10%|true|true” animation_style=”slide” animation_direction=”bottom” animation_intensity_slide=”1%” use_custom_width=”on” width_unit=”off” custom_width_percent=”100%”][et_pb_column type=”4_4″ _builder_version=”3.25″ custom_padding=”|||” custom_padding__hover=”|||”][et_pb_text _builder_version=”4.9.2″ text_font=”||||||||” text_font_size=”20px” text_line_height=”1.8em” header_font=”Lato|900|||||||” header_font_size=”56px” header_letter_spacing=”1px” header_line_height=”1.2em” header_4_font=”Raleway|||on|||||” header_4_font_size=”22px” header_4_letter_spacing=”4px” header_4_line_height=”1.6em” text_orientation=”center” background_layout=”dark” max_width=”800px” module_alignment=”center” text_font_size_tablet=”” text_font_size_phone=”14px” text_font_size_last_edited=”on|desktop” header_font_size_tablet=”62px” header_font_size_phone=”38px” header_font_size_last_edited=”on|phone”]<h4><span style="color:#0f1424;">BI ViDA Panel:</span></h4>
<h1><span style="color:#0f1424;"><strong>Deep RL and Dopamine</strong></span></h1>[/et_pb_text][et_pb_post_title title=”off” meta=”off” _builder_version=”4.9.2″ meta_font=”|700|||||||” meta_text_color=”#fe4943″ text_orientation=”center”][/et_pb_post_title][et_pb_image src=”https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png” title_text=”become_a_patron_button@2x” url=”https://www.patreon.com/braininspired” url_new_window=”on” align=”center” _builder_version=”4.9.2″ _module_preset=”default”][/et_pb_image][et_pb_text _builder_version=”4.9.2″ text_font=”||||||||” text_font_size=”20px” text_line_height=”1.8em” header_font=”Lato|900|||||||” header_font_size=”82px” header_letter_spacing=”1px” header_line_height=”1.2em” header_4_font=”Raleway|||on|||||” header_4_font_size=”22px” header_4_letter_spacing=”4px” header_4_line_height=”1.6em” text_orientation=”center” background_layout=”dark” max_width=”800px” module_alignment=”center” hover_enabled=”0″ text_font_size_tablet=”” text_font_size_phone=”14px” text_font_size_last_edited=”on|desktop” header_font_size_tablet=”62px” header_font_size_phone=”38px” header_font_size_last_edited=”on|phone” sticky_enabled=”0″]<p style="text-align:left;"><span>This is a panel discussion at the </span><a href="http://www.vidaconference.com/" class="_4X_-components-SimpleRichTextEditor-components-LinkSpan--linkSpan"><span>Virtual Dopamine Conference</span></a><span> 2021 (last episode </span><a href="https://braininspired.co/podcast/112/" class="_4X_-components-SimpleRichTextEditor-components-LinkSpan--linkSpan"><span>Ali Mohebi and Ben Engelhard</span></a><span> discussed dopamine more broadly).</span></p>
<p style="text-align:left;"><span>Before the panel, <a href="https://www.youtube.com/watch?v=KvFDAAIvlHo" class="_4X_-components-SimpleRichTextEditor-components-LinkSpan--linkSpan">Matt Botvinick gave a talk</a> about how deep reinforcement learning is doing amazing things and helping drive our understanding of brain function.</span></p>[/et_pb_text][et_pb_image src=”https://braininspired.co/wp-content/uploads/2021/09/ViDA-artArtboard-3.png” _builder_version=”4.9.2″ _module_preset=”default” title_text=”ViDA artArtboard 3″ hover_enabled=”0″ sticky_enabled=”0″][/et_pb_image][et_pb_text _builder_version=”4.9.2″ _module_preset=”default” hover_ena...]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[
[et_pb_section fb_built=”1″ admin_label=”Header” _builder_version=”4.9.2″ background_color=”#ad876d” background_enable_image=”off” parallax=”on” custom_padding=”0vw||0vw||true|false” custom_css_main_element=”.podcast .entry-title {||display: none;||}” background_size__hover=”cover” background_size__hover_enabled=”cover”][et_pb_row _builder_version=”4.9.2″ background_color=”#d5a570″ use_background_color_gradient=”on” background_color_gradient_start=”rgba(26,24,68,0)” background_color_gradient_end=”#231f20″ background_color_gradient_overlays_image=”on” background_enable_image=”off” background_position=”top_center” width=”100%” max_width=”100%” min_height=”940px” custom_margin=”||” custom_padding=”4vw|10%|4vw|10%|true|true” animation_style=”slide” animation_direction=”bottom” animation_intensity_slide=”1%” use_custom_width=”on” width_unit=”off” custom_width_percent=”100%”][et_pb_column type=”4_4″ _builder_version=”3.25″ custom_padding=”|||” custom_padding__hover=”|||”][et_pb_text _builder_version=”4.9.2″ text_font=”||||||||” text_font_size=”20px” text_line_height=”1.8em” header_font=”Lato|900|||||||” header_font_size=”56px” header_letter_spacing=”1px” header_line_height=”1.2em” header_4_font=”Raleway|||on|||||” header_4_font_size=”22px” header_4_letter_spacing=”4px” header_4_line_height=”1.6em” text_orientation=”center” background_layout=”dark” max_width=”800px” module_alignment=”center” text_font_size_tablet=”” text_font_size_phone=”14px” text_font_size_last_edited=”on|desktop” header_font_size_tablet=”62px” header_font_size_phone=”38px” header_font_size_last_edited=”on|phone”]BI ViDA Panel:
Deep RL and Dopamine[/et_pb_text][et_pb_post_title title=”off” meta=”off” _builder_version=”4.9.2″ meta_font=”|700|||||||” meta_text_color=”#fe4943″ text_orientation=”center”][/et_pb_post_title][et_pb_image src=”https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png” title_text=”become_a_patron_button@2x” url=”https://www.patreon.com/braininspired” url_new_window=”on” align=”center” _builder_version=”4.9.2″ _module_preset=”default”][/et_pb_image][et_pb_text _builder_version=”4.9.2″ text_font=”||||||||” text_font_size=”20px” text_line_height=”1.8em” header_font=”Lato|900|||||||” header_font_size=”82px” header_letter_spacing=”1px” header_line_height=”1.2em” header_4_font=”Raleway|||on|||||” header_4_font_size=”22px” header_4_letter_spacing=”4px” header_4_line_height=”1.6em” text_orientation=”center” background_layout=”dark” max_width=”800px” module_alignment=”center” hover_enabled=”0″ text_font_size_tablet=”” text_font_size_phone=”14px” text_font_size_last_edited=”on|desktop” header_font_size_tablet=”62px” header_font_size_phone=”38px” header_font_size_last_edited=”on|phone” sticky_enabled=”0″]This is a panel discussion at the Virtual Dopamine Conference 2021 (last episode Ali Mohebi and Ben Engelhard discussed dopamine more broadly).
Before the panel, Matt Botvinick gave a talk about how deep reinforcement learning is doing amazing things and helping drive our understanding of brain function.[/et_pb_text][et_pb_image src=”https://braininspired.co/wp-content/uploads/2021/09/ViDA-artArtboard-3.png” _builder_version=”4.9.2″ _module_preset=”default” title_text=”ViDA artArtboard 3″ hover_enabled=”0″ sticky_enabled=”0″][/et_pb_image][et_pb_text _builder_version=”4.9.2″ _module_preset=”default” hover_ena...]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI ViDA Panel Discussion: Deep RL and Dopamine]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[
[et_pb_section fb_built=”1″ admin_label=”Header” _builder_version=”4.9.2″ background_color=”#ad876d” background_enable_image=”off” parallax=”on” custom_padding=”0vw||0vw||true|false” custom_css_main_element=”.podcast .entry-title {||display: none;||}” background_size__hover=”cover” background_size__hover_enabled=”cover”][et_pb_row _builder_version=”4.9.2″ background_color=”#d5a570″ use_background_color_gradient=”on” background_color_gradient_start=”rgba(26,24,68,0)” background_color_gradient_end=”#231f20″ background_color_gradient_overlays_image=”on” background_enable_image=”off” background_position=”top_center” width=”100%” max_width=”100%” min_height=”940px” custom_margin=”||” custom_padding=”4vw|10%|4vw|10%|true|true” animation_style=”slide” animation_direction=”bottom” animation_intensity_slide=”1%” use_custom_width=”on” width_unit=”off” custom_width_percent=”100%”][et_pb_column type=”4_4″ _builder_version=”3.25″ custom_padding=”|||” custom_padding__hover=”|||”][et_pb_text _builder_version=”4.9.2″ text_font=”||||||||” text_font_size=”20px” text_line_height=”1.8em” header_font=”Lato|900|||||||” header_font_size=”56px” header_letter_spacing=”1px” header_line_height=”1.2em” header_4_font=”Raleway|||on|||||” header_4_font_size=”22px” header_4_letter_spacing=”4px” header_4_line_height=”1.6em” text_orientation=”center” background_layout=”dark” max_width=”800px” module_alignment=”center” text_font_size_tablet=”” text_font_size_phone=”14px” text_font_size_last_edited=”on|desktop” header_font_size_tablet=”62px” header_font_size_phone=”38px” header_font_size_last_edited=”on|phone”]<h4><span style="color:#0f1424;">BI ViDA Panel:</span></h4>
<h1><span style="color:#0f1424;"><strong>Deep RL and Dopamine</strong></span></h1>[/et_pb_text][et_pb_post_title title=”off” meta=”off” _builder_version=”4.9.2″ meta_font=”|700|||||||” meta_text_color=”#fe4943″ text_orientation=”center”][/et_pb_post_title][et_pb_image src=”https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png” title_text=”become_a_patron_button@2x” url=”https://www.patreon.com/braininspired” url_new_window=”on” align=”center” _builder_version=”4.9.2″ _module_preset=”default”][/et_pb_image][et_pb_text _builder_version=”4.9.2″ text_font=”||||||||” text_font_size=”20px” text_line_height=”1.8em” header_font=”Lato|900|||||||” header_font_size=”82px” header_letter_spacing=”1px” header_line_height=”1.2em” header_4_font=”Raleway|||on|||||” header_4_font_size=”22px” header_4_letter_spacing=”4px” header_4_line_height=”1.6em” text_orientation=”center” background_layout=”dark” max_width=”800px” module_alignment=”center” hover_enabled=”0″ text_font_size_tablet=”” text_font_size_phone=”14px” text_font_size_last_edited=”on|desktop” header_font_size_tablet=”62px” header_font_size_phone=”38px” header_font_size_last_edited=”on|phone” sticky_enabled=”0″]<p style="text-align:left;"><span>This is a panel discussion at the </span><a href="http://www.vidaconference.com/" class="_4X_-components-SimpleRichTextEditor-components-LinkSpan--linkSpan"><span>Virtual Dopamine Conference</span></a><span> 2021 (last episode </span><a href="https://braininspired.co/podcast/112/" class="_4X_-components-SimpleRichTextEditor-components-LinkSpan--linkSpan"><span>Ali Mohebi and Ben Engelhard</span></a><span> discussed dopamine more broadly).</span></p>
<p style="text-align:left;"><span>Before the panel, <a href="https://www.youtube.com/watch?v=KvFDAAIvlHo" class="_4X_-components-SimpleRichTextEditor-components-LinkSpan--linkSpan">Matt Botvinick gave a talk</a> about how deep reinforcement learning is doing amazing things and helping drive our understanding of brain function.</span></p>[/et_pb_text][et_pb_image src=”https://braininspired.co/wp-content/uploads/2021/09/ViDA-artArtboard-3.png” _builder_version=”4.9.2″ _module_preset=”default” title_text=”ViDA artArtboard 3″ hover_enabled=”0″ sticky_enabled=”0″][/et_pb_image][et_pb_text _builder_version=”4.9.2″ _module_preset=”default” hover_enabled=”0″ sticky_enabled=”0″]<p style="text-align:center;"><span style="color:#999999;">Different views on dopamine, deep RL, and the brain</span></p>
<p style="text-align:center;"><span style="color:#999999;">by <a href="https://twitter.com/d4phn3c" style="color:#999999;">Daphne Cornelisse</a>.</span></p>[/et_pb_text][et_pb_text _builder_version=”4.9.2″ _module_preset=”default” hover_enabled=”0″ sticky_enabled=”0″]<div class="_2TO-components-SimpleRichTextEditor--paragraphElement">
<div class="public-DraftStyleDefault-block public-DraftStyleDefault-ltr"><span style="color:#ffffff;"><strong>What can artificial intelligence teach us about how the brain uses dopamine to learn?</strong></span></div>
</div>
<div class="_2TO-components-SimpleRichTextEditor--paragraphElement">
<div class="public-DraftStyleDefault-block public-DraftStyleDefault-ltr"><span style="color:#ffffff;">Recent advances in artificial intelligence have yielded novel algorithms for reinforcement learning (RL), which leverage the power of deep learning together with reward prediction error signals in order to achieve unprecedented performance in complex tasks. In the brain, reward prediction error signals are thought to be signaled by midbrain dopamine neurons and support learning. Can these new advances in Deep RL help us understand the role that dopamine plays in learning? In this panel experts in both theoretical and experimental dopamine research will discuss this question.</span></div>
</div>
<div class="_2TO-components-SimpleRichTextEditor--paragraphElement"></div>[/et_pb_text][/et_pb_column][/et_pb_row][/et_pb_section][et_pb_section fb_built=”1″ admin_label=”Episode Details” _builder_version=”4.9.2″ background_color=”#d5a570″ background_image=”https://braininspired.co/wp-content/uploads/2021/08/podcast-06.png” parallax=”on” custom_padding=”0px|||||” background_size__hover=”cover” background_size__hover_enabled=”cover”][et_pb_row _builder_version=”4.9.2″ custom_padding=”47px|||||” custom_width_px__hover=”1080px” custom_width_px__hover_enabled=”1080px” custom_width_percent__hover=”80%” custom_width_percent__hover_enabled=”80%” use_custom_width__hover=”off” use_custom_width__hover_enabled=”off” width_unit__hover=”on” width_unit__hover_enabled=”on” gutter_width__hover=”3″ gutter_width__hover_enabled=”3″ parallax_1__hover=”off” parallax_1__hover_enabled=”off” parallax_2__hover=”off” parallax_2__hover_enabled=”off” parallax_3__hover=”off” parallax_3__hover_enabled=”off” parallax_4__hover=”off” parallax_4__hover_enabled=”off” parallax_5__hover=”off” parallax_5__hover_enabled=”off” parallax_6__hover=”off” parallax_6__hover_enabled=”off” parallax_method_1__hover=”on” parallax_method_1__hover_enabled=”on” parallax_method_2__hover=”on” parallax_method_2__hover_enabled=”on” parallax_method_3__hover=”on” parallax_method_3__hover_enabled=”on” parallax_method_4__hover=”on” parallax_method_4__hover_enabled=”on” parallax_method_5__hover=”on” parallax_method_5__hover_enabled=”on” parallax_method_6__hover=”on” parallax_method_6__hover_enabled=”on” use_background_color_gradient__hover=”off” use_background_color_gradient__hover_enabled=”off” background_color_gradient_start__hover=”#2b87da” background_color_gradient_start__hover_enabled=”#2b87da” background_color_gradient_end__hover=”#29c4a9″ background_color_gradient_end__hover_enabled=”#29c4a9″ background_color_gradient_overlays_image__hover=”off” background_color_gradient_overlays_image__hover_enabled=”off” background_position__hover=”center” background_position__hover_enabled=”center” animation_style__hover=”none” animation_style__hover_enabled=”none” animation_direction__hover=”center” animation_direction__hover_enabled=”center” animation_intensity_slide__hover=”50%” animation_intensity_slide__hover_enabled=”50%” background_color_gradient_type__hover=”linear” background_color_gradient_type__hover_enabled=”linear” background_color_gradient_direction__hover=”180deg” background_color_gradient_direction__hover_enabled=”180deg” background_color_gradient_direction_radial__hover=”center” background_color_gradient_direction_radial__hover_enabled=”center” background_color_gradient_start_position__hover=”0%” background_color_gradient_start_position__hover_enabled=”0%” background_color_gradient_end_position__hover=”100%” background_color_gradient_end_position__hover_enabled=”100%” parallax__hover=”off” parallax__hover_enabled=”off” parallax_method__hover=”on” parallax_method__hover_enabled=”on” background_size__hover=”cover” background_size__hover_enabled=”cover” background_repeat__hover=”no-repeat” background_repeat__hover_enabled=”no-repeat” background_blend__hover=”normal” background_blend__hover_enabled=”normal” allow_player_pause__hover=”off” allow_player_pause__hover_enabled=”off” background_video_pause_outside_viewport__hover=”on” background_video_pause_outside_viewport__hover_enabled=”on” make_fullwidth__hover=”off” make_fullwidth__hover_enabled=”off” use_custom_gutter__hover=”off” use_custom_gutter__hover_enabled=”off” make_equal__hover=”off” make_equal__hover_enabled=”off” border_radii__hover=”on||||” border_radii__hover_enabled=”on||||” box_shadow_style__hover=”none” box_shadow_style__hover_enabled=”none” box_shadow_color__hover=”rgba(0,0,0,0.3)” box_shadow_color__hover_enabled=”rgba(0,0,0,0.3)” filter_hue_rotate__hover=”0deg” filter_hue_rotate__hover_enabled=”0deg” filter_saturate__hover=”100%” filter_saturate__hover_enabled=”100%” filter_brightness__hover=”100%” filter_brightness__hover_enabled=”100%” filter_contrast__hover=”100%” filter_contrast__hover_enabled=”100%” filter_invert__hover=”0%” filter_invert__hover_enabled=”0%” filter_sepia__hover=”0%” filter_sepia__hover_enabled=”0%” filter_opacity__hover=”100%” filter_opacity__hover_enabled=”100%” filter_blur__hover=”0px” filter_blur__hover_enabled=”0px” mix_blend_mode__hover=”normal” mix_blend_mode__hover_enabled=”normal” animation_repeat__hover=”once” animation_repeat__hover_enabled=”once” animation_duration__hover=”1000ms” animation_duration__hover_enabled=”1000ms” animation_delay__hover=”0ms” animation_delay__hover_enabled=”0ms” animation_intensity_zoom__hover=”50%” animation_intensity_zoom__hover_enabled=”50%” animation_intensity_flip__hover=”50%” animation_intensity_flip__hover_enabled=”50%” animation_intensity_fold__hover=”50%” animation_intensity_fold__hover_enabled=”50%” animation_intensity_roll__hover=”50%” animation_intensity_roll__hover_enabled=”50%” animation_starting_opacity__hover=”0%” animation_starting_opacity__hover_enabled=”0%” animation_speed_curve__hover=”ease-in-out” animation_speed_curve__hover_enabled=”ease-in-out” hover_transition_duration__hover=”300ms” hover_transition_duration__hover_enabled=”300ms” hover_transition_delay__hover=”0ms” hover_transition_delay__hover_enabled=”0ms” hover_transition_speed_curve__hover=”ease” hover_transition_speed_curve__hover_enabled=”ease”][et_pb_column type=”4_4″ _builder_version=”4.9.2″ _module_preset=”default”][et_pb_text _builder_version=”4.9.2″ text_text_color=”#0f1424″ text_line_height=”2em” link_font=”||||||||” link_text_color=”#0f1424″ header_text_color=”#0f1424″ header_2_text_color=”#0f1424″ header_3_text_color=”#0f1424″ header_4_text_color=”#0f1424″ hover_enabled=”0″ sticky_enabled=”0″]<p></p>
<h3>Panel</h3>
<p><a href="https://psych.la.psu.edu/directory/bpw10"></a></p>
<ul>
<li class="public-DraftStyleDefault-unorderedListItem public-DraftStyleDefault-reset public-DraftStyleDefault-depth0 public-DraftStyleDefault-listLTR">
<div class="public-DraftStyleDefault-block public-DraftStyleDefault-ltr"><a href="https://hai.stanford.edu/people/matthew-botvinick" class="_4X_-components-SimpleRichTextEditor-components-LinkSpan--linkSpan"><span><span>Matt Botvinick</span></span></a></div>
<ul>
<li class="public-DraftStyleDefault-unorderedListItem public-DraftStyleDefault-reset public-DraftStyleDefault-depth1 public-DraftStyleDefault-listLTR">
<div class="public-DraftStyleDefault-block public-DraftStyleDefault-ltr"><a href="https://braininspired.co/podcast/21/" class="_4X_-components-SimpleRichTextEditor-components-LinkSpan--linkSpan"><span><span>BI 021 Matt Botvinick: Neuroscience and AI at DeepMind</span></span></a></div>
</li>
</ul>
</li>
<li class="public-DraftStyleDefault-unorderedListItem public-DraftStyleDefault-depth0 public-DraftStyleDefault-listLTR">
<div class="public-DraftStyleDefault-block public-DraftStyleDefault-ltr"><a href="https://www.momen-nejad.org/" class="_4X_-components-SimpleRichTextEditor-components-LinkSpan--linkSpan"><span><span>Ida Mommenejad</span></span></a></div>
<ul>
<li class="public-DraftStyleDefault-unorderedListItem public-DraftStyleDefault-reset public-DraftStyleDefault-depth1 public-DraftStyleDefault-listLTR">
<div class="public-DraftStyleDefault-block public-DraftStyleDefault-ltr"><a href="https://twitter.com/criticalneuro" class="_4X_-components-SimpleRichTextEditor-components-LinkSpan--linkSpan"><span><span>@criticalneuro</span></span></a></div>
</li>
<li class="public-DraftStyleDefault-unorderedListItem public-DraftStyleDefault-depth1 public-DraftStyleDefault-listLTR">
<div class="public-DraftStyleDefault-block public-DraftStyleDefault-ltr"><a href="https://braininspired.co/podcast/85/" class="_4X_-components-SimpleRichTextEditor-components-LinkSpan--linkSpan"><span><span>BI 085 Ida Momennejad: Learning Representations</span></span></a></div>
</li>
</ul>
</li>
<li class="public-DraftStyleDefault-unorderedListItem public-DraftStyleDefault-depth0 public-DraftStyleDefault-listLTR">
<div class="public-DraftStyleDefault-block public-DraftStyleDefault-ltr"><a href="http://lk.zuckermaninstitute.columbia.edu/index.shtml" class="_4X_-components-SimpleRichTextEditor-components-LinkSpan--linkSpan"><span><span>Ashok Litwin-Kumar</span></span></a></div>
</li>
<li class="public-DraftStyleDefault-unorderedListItem public-DraftStyleDefault-depth0 public-DraftStyleDefault-listLTR">
<div class="public-DraftStyleDefault-block public-DraftStyleDefault-ltr"><a href="https://wittenlab.org/" class="_4X_-components-SimpleRichTextEditor-components-LinkSpan--linkSpan"><span><span>Ilana Witten</span></span></a></div>
<ul>
<li class="public-DraftStyleDefault-unorderedListItem public-DraftStyleDefault-reset public-DraftStyleDefault-depth1 public-DraftStyleDefault-listLTR">
<div class="public-DraftStyleDefault-block public-DraftStyleDefault-ltr"><a href="https://twitter.com/IlanaWitten" class="_4X_-components-SimpleRichTextEditor-components-LinkSpan--linkSpan"><span><span>@IlanaWitten</span></span></a></div>
</li>
</ul>
</li>
<li class="public-DraftStyleDefault-unorderedListItem public-DraftStyleDefault-depth0 public-DraftStyleDefault-listLTR">
<div class="public-DraftStyleDefault-block public-DraftStyleDefault-ltr"><a href="https://www.laklab.org/" class="_4X_-components-SimpleRichTextEditor-components-LinkSpan--linkSpan"><span><span>Armin Lak</span></span></a></div>
<ul>
<li class="public-DraftStyleDefault-unorderedListItem public-DraftStyleDefault-reset public-DraftStyleDefault-depth1 public-DraftStyleDefault-listLTR">
<div class="public-DraftStyleDefault-block public-DraftStyleDefault-ltr"><a href="https://twitter.com/ArminLak" class="_4X_-components-SimpleRichTextEditor-components-LinkSpan--linkSpan"><span><span>@</span></span><span><span>A</span></span><span><span>r</span></span><span><span>m</span></span><span><span>i</span></span><span><span>n</span></span><span><span>L</span></span><span><span>a</span></span><span><span>k</span></span></a><span><span>;</span></span></div>
</li>
</ul>
</li>
<li class="public-DraftStyleDefault-unorderedListItem public-DraftStyleDefault-depth0 public-DraftStyleDefault-listLTR">
<div class="public-DraftStyleDefault-block public-DraftStyleDefault-ltr"><span><span>Co-moderated by Tim Krausz (PhD student in Josh </span></span><a href="https://berkelab.org/index.html" class="_4X_-components-SimpleRichTextEditor-components-LinkSpan--linkSpan"><span><span>Berke’s lab</span></span></a><span><span>).</span></span></div>
</li>
</ul>

<p></p>[/et_pb_text][et_pb_text _builder_version=”4.9.2″ _module_preset=”default” hover_enabled=”0″ sticky_enabled=”0″]<div>Timestamps:</div>
<div>0:00 – Intro</div>
<div>4:45 – Armin Lak</div>
<div>6:50 – Ashok Litwin-Kumar</div>
<div>8:28 – Ilana Witten</div>
<div>10:15 – Ida Mommenejad</div>
<div>11:57 – Is reward enough?</div>
<div>14:56 – Can AI lead neuroscience astray?</div>
<div>25:45 – Other roles for dopamine</div>
<div>31:33 – Does structure matter?</div>
<div>44:30 – Non-brain-like solutions in AI</div>
<div>48:03 – What’s missing in deep RL for brains?</div>
<p> </p>[/et_pb_text][/et_pb_column][/et_pb_row][/et_pb_section]
]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/ViDA-Panel.mp3" length="55418977"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[
[et_pb_section fb_built=”1″ admin_label=”Header” _builder_version=”4.9.2″ background_color=”#ad876d” background_enable_image=”off” parallax=”on” custom_padding=”0vw||0vw||true|false” custom_css_main_element=”.podcast .entry-title {||display: none;||}” background_size__hover=”cover” background_size__hover_enabled=”cover”][et_pb_row _builder_version=”4.9.2″ background_color=”#d5a570″ use_background_color_gradient=”on” background_color_gradient_start=”rgba(26,24,68,0)” background_color_gradient_end=”#231f20″ background_color_gradient_overlays_image=”on” background_enable_image=”off” background_position=”top_center” width=”100%” max_width=”100%” min_height=”940px” custom_margin=”||” custom_padding=”4vw|10%|4vw|10%|true|true” animation_style=”slide” animation_direction=”bottom” animation_intensity_slide=”1%” use_custom_width=”on” width_unit=”off” custom_width_percent=”100%”][et_pb_column type=”4_4″ _builder_version=”3.25″ custom_padding=”|||” custom_padding__hover=”|||”][et_pb_text _builder_version=”4.9.2″ text_font=”||||||||” text_font_size=”20px” text_line_height=”1.8em” header_font=”Lato|900|||||||” header_font_size=”56px” header_letter_spacing=”1px” header_line_height=”1.2em” header_4_font=”Raleway|||on|||||” header_4_font_size=”22px” header_4_letter_spacing=”4px” header_4_line_height=”1.6em” text_orientation=”center” background_layout=”dark” max_width=”800px” module_alignment=”center” text_font_size_tablet=”” text_font_size_phone=”14px” text_font_size_last_edited=”on|desktop” header_font_size_tablet=”62px” header_font_size_phone=”38px” header_font_size_last_edited=”on|phone”]BI ViDA Panel:
Deep RL and Dopamine[/et_pb_text][et_pb_post_title title=”off” meta=”off” _builder_version=”4.9.2″ meta_font=”|700|||||||” meta_text_color=”#fe4943″ text_orientation=”center”][/et_pb_post_title][et_pb_image src=”https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png” title_text=”become_a_patron_button@2x” url=”https://www.patreon.com/braininspired” url_new_window=”on” align=”center” _builder_version=”4.9.2″ _module_preset=”default”][/et_pb_image][et_pb_text _builder_version=”4.9.2″ text_font=”||||||||” text_font_size=”20px” text_line_height=”1.8em” header_font=”Lato|900|||||||” header_font_size=”82px” header_letter_spacing=”1px” header_line_height=”1.2em” header_4_font=”Raleway|||on|||||” header_4_font_size=”22px” header_4_letter_spacing=”4px” header_4_line_height=”1.6em” text_orientation=”center” background_layout=”dark” max_width=”800px” module_alignment=”center” hover_enabled=”0″ text_font_size_tablet=”” text_font_size_phone=”14px” text_font_size_last_edited=”on|desktop” header_font_size_tablet=”62px” header_font_size_phone=”38px” header_font_size_last_edited=”on|phone” sticky_enabled=”0″]This is a panel discussion at the Virtual Dopamine Conference 2021 (last episode Ali Mohebi and Ben Engelhard discussed dopamine more broadly).
Before the panel, Matt Botvinick gave a talk about how deep reinforcement learning is doing amazing things and helping drive our understanding of brain function.[/et_pb_text][et_pb_image src=”https://braininspired.co/wp-content/uploads/2021/09/ViDA-artArtboard-3.png” _builder_version=”4.9.2″ _module_preset=”default” title_text=”ViDA artArtboard 3″ hover_enabled=”0″ sticky_enabled=”0″][/et_pb_image][et_pb_text _builder_version=”4.9.2″ _module_preset=”default” hover_ena...]]>
                </itunes:summary>
                                                                            <itunes:duration>00:57:25</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 112 Ali Mohebi and Ben Engelhard: The Many Faces of Dopamine]]>
                </title>
                <pubDate>Thu, 26 Aug 2021 15:42:35 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-112-ali-mohebi-and-ben-engelhard-the-many-faces-of-dopamine</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-112-ali-mohebi-and-ben-engelhard-the-many-faces-of-dopamine</link>
                                <description>
                                            <![CDATA[<div class="et_pb_section et_pb_section_0 et_pb_with_background et_pb_section_parallax et_section_regular">
				
				
				
				
					<div class="et_pb_row et_pb_row_0">
				<div class="et_pb_column et_pb_column_4_4 et_pb_column_0 et_pb_css_mix_blend_mode_passthrough et-last-child">
				
				
				<div class="et_pb_module et_pb_text et_pb_text_0 et_pb_text_align_center et_pb_bg_layout_dark">
				
				
				<div class="et_pb_text_inner"><h4><span style="color:#0f1424;">BI 112:</span></h4>
<h2><span style="color:#0f1424;">Ali Mohebi and Ben Engelhard</span></h2>
<h1><span style="color:#0f1424;"><strong>The Many Faces of Dopamine</strong></span></h1></div>
			</div> <div class="et_pb_with_border et_pb_module et_pb_text et_pb_text_1 et_pb_text_align_left et_pb_bg_layout_light">
				
				
				<div class="et_pb_text_inner"><h2 style="text-align:center;">Announcement:</h2>
<p style="text-align:center;"><strong>Ben has started his new lab and is recruiting grad students. </strong></p>
<p style="text-align:center;"><strong>Check out his lab here and apply!</strong></p>
<h2 style="text-align:center;"><span style="color:#ccffff;"><a href="https://engelhardlab.com/" style="color:#ccffff;">Engelhard Lab</a></span></h2>
<p> </p></div>
			</div> <div class="et_pb_module et_pb_post_title et_pb_post_title_0 et_pb_bg_layout_light et_pb_text_align_center">
				
				
				
				<div class="et_pb_title_container">
					
				</div>
				
			</div><div class="et_pb_module et_pb_image et_pb_image_0">
				
				
				<a href="https://www.patreon.com/braininspired" target="_blank" rel="noreferrer noopener"><span class="et_pb_image_wrap"><img src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" title="become_a_patron_button@2x" class="wp-image-585" /></span></a>
			</div><div class="et_pb_module et_pb_text et_pb_text_2 et_pb_text_align_center et_pb_bg_layout_dark">
				
				
				<div class="et_pb_text_inner"><div style="text-align:left;">Ali and Ben discuss the ever-expanding discoveries about the roles dopamine plays for our cognition. Dopamine is known to play a role in learning – dopamine (DA) neurons fire when our reward expectations aren’t met, and that signal helps adjust our expectation. Roughly, DA corresponds to a reward prediction error. The reward prediction error has helped reinforcement learning in AI develop into a raging success, specially with deep reinforcement learning models trained to out-perform humans in games like chess and Go. But DA likely contributes a lot more to brain function. We discuss many of those possible roles, how to think about computation with respect to neuromodulators like DA, how different time and spatial scales interact, and more.</div></div>
			</div> <div class="et_pb_module et_pb_image et_pb_image_1">
				
				
				<span class="et_pb_image_wrap"><img src="https://braininspired.co/wp-content/uploads/2021/08/Dopamine-conference-03-1.jpg" alt="" title="Dopamine conference-03" class="wp-image-1421" /></span>
			</div><div class="et_pb_module et_pb_text et_pb_text_3 et_pb_text_align_left et_pb_bg_layout_light">
				
				
				<div class="et_pb_text_inner"><p style="text-align:center;"><span style="color:#999999;">Dopamine: A Simple <em>AND</em> Complex Story </span></p>
<p style="text-align:center;"><span style="color:#999999;">by </span><span style="color:#ffffff;"><a href="https://twitter.com/d4phn3c" style="color:#ffffff;">Daphne Cornelisse</a></span></p></div>
			</div> 
			</div> 
				
				
			</div> 
				
				
			</div> <div class="et_pb_section et_pb_section_1 et_pb_section_parallax et_pb_with_background et_section_regular">
				
				<div class="et_parallax_bg_wrap"><div class="et_parallax_bg" style="background-image:url(&quot;https://braininspired.co/wp-content/uploads/2021/08/podcast-06.png&quot;);"></div></div>
				
				
					<div class="et_pb_row et_pb_row_1 et_hover_enabled">
				<div class="et_pb_column et_pb_column_1_2 et_pb_column_1 et_pb_css_mix_blend_mode_passthr...&lt;/div&gt;&lt;/body&gt;&lt;/html&gt;"></div></div></div>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[
				
				
				
				
					
				
				
				
				
				
				
				BI 112:
Ali Mohebi and Ben Engelhard
The Many Faces of Dopamine
			 
				
				
				Announcement:
Ben has started his new lab and is recruiting grad students. 
Check out his lab here and apply!
Engelhard Lab
 
			 
				
				
				
				
					
				
				
			
				
				
				
			
				
				
				Ali and Ben discuss the ever-expanding discoveries about the roles dopamine plays for our cognition. Dopamine is known to play a role in learning – dopamine (DA) neurons fire when our reward expectations aren’t met, and that signal helps adjust our expectation. Roughly, DA corresponds to a reward prediction error. The reward prediction error has helped reinforcement learning in AI develop into a raging success, specially with deep reinforcement learning models trained to out-perform humans in games like chess and Go. But DA likely contributes a lot more to brain function. We discuss many of those possible roles, how to think about computation with respect to neuromodulators like DA, how different time and spatial scales interact, and more.
			 
				
				
				
			
				
				
				Dopamine: A Simple AND Complex Story 
by Daphne Cornelisse
			 
			 
				
				
			 
				
				
			 
				
				
				
				
					
				]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 112 Ali Mohebi and Ben Engelhard: The Many Faces of Dopamine]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<div class="et_pb_section et_pb_section_0 et_pb_with_background et_pb_section_parallax et_section_regular">
				
				
				
				
					<div class="et_pb_row et_pb_row_0">
				<div class="et_pb_column et_pb_column_4_4 et_pb_column_0 et_pb_css_mix_blend_mode_passthrough et-last-child">
				
				
				<div class="et_pb_module et_pb_text et_pb_text_0 et_pb_text_align_center et_pb_bg_layout_dark">
				
				
				<div class="et_pb_text_inner"><h4><span style="color:#0f1424;">BI 112:</span></h4>
<h2><span style="color:#0f1424;">Ali Mohebi and Ben Engelhard</span></h2>
<h1><span style="color:#0f1424;"><strong>The Many Faces of Dopamine</strong></span></h1></div>
			</div> <div class="et_pb_with_border et_pb_module et_pb_text et_pb_text_1 et_pb_text_align_left et_pb_bg_layout_light">
				
				
				<div class="et_pb_text_inner"><h2 style="text-align:center;">Announcement:</h2>
<p style="text-align:center;"><strong>Ben has started his new lab and is recruiting grad students. </strong></p>
<p style="text-align:center;"><strong>Check out his lab here and apply!</strong></p>
<h2 style="text-align:center;"><span style="color:#ccffff;"><a href="https://engelhardlab.com/" style="color:#ccffff;">Engelhard Lab</a></span></h2>
<p> </p></div>
			</div> <div class="et_pb_module et_pb_post_title et_pb_post_title_0 et_pb_bg_layout_light et_pb_text_align_center">
				
				
				
				<div class="et_pb_title_container">
					
				</div>
				
			</div><div class="et_pb_module et_pb_image et_pb_image_0">
				
				
				<a href="https://www.patreon.com/braininspired" target="_blank" rel="noreferrer noopener"><span class="et_pb_image_wrap"><img src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" title="become_a_patron_button@2x" class="wp-image-585" /></span></a>
			</div><div class="et_pb_module et_pb_text et_pb_text_2 et_pb_text_align_center et_pb_bg_layout_dark">
				
				
				<div class="et_pb_text_inner"><div style="text-align:left;">Ali and Ben discuss the ever-expanding discoveries about the roles dopamine plays for our cognition. Dopamine is known to play a role in learning – dopamine (DA) neurons fire when our reward expectations aren’t met, and that signal helps adjust our expectation. Roughly, DA corresponds to a reward prediction error. The reward prediction error has helped reinforcement learning in AI develop into a raging success, specially with deep reinforcement learning models trained to out-perform humans in games like chess and Go. But DA likely contributes a lot more to brain function. We discuss many of those possible roles, how to think about computation with respect to neuromodulators like DA, how different time and spatial scales interact, and more.</div></div>
			</div> <div class="et_pb_module et_pb_image et_pb_image_1">
				
				
				<span class="et_pb_image_wrap"><img src="https://braininspired.co/wp-content/uploads/2021/08/Dopamine-conference-03-1.jpg" alt="" title="Dopamine conference-03" class="wp-image-1421" /></span>
			</div><div class="et_pb_module et_pb_text et_pb_text_3 et_pb_text_align_left et_pb_bg_layout_light">
				
				
				<div class="et_pb_text_inner"><p style="text-align:center;"><span style="color:#999999;">Dopamine: A Simple <em>AND</em> Complex Story </span></p>
<p style="text-align:center;"><span style="color:#999999;">by </span><span style="color:#ffffff;"><a href="https://twitter.com/d4phn3c" style="color:#ffffff;">Daphne Cornelisse</a></span></p></div>
			</div> 
			</div> 
				
				
			</div> 
				
				
			</div> <div class="et_pb_section et_pb_section_1 et_pb_section_parallax et_pb_with_background et_section_regular">
				
				<div class="et_parallax_bg_wrap"><div class="et_parallax_bg" style="background-image:url(&quot;https://braininspired.co/wp-content/uploads/2021/08/podcast-06.png&quot;);"></div></div>
				
				
					<div class="et_pb_row et_pb_row_1 et_hover_enabled">
				<div class="et_pb_column et_pb_column_1_2 et_pb_column_1 et_pb_css_mix_blend_mode_passthrough">
				
				
				<div class="et_pb_module et_pb_text et_pb_text_4 et_pb_text_align_left et_pb_bg_layout_light">
				
				
				<div class="et_pb_text_inner"><p></p>
<h3>Guests </h3>
<ul>
<li>
<div><a href="https://mohebial.com/index.html">Ali Mohebi</a></div>
<ul>
<li>
<div><a href="https://twitter.com/mohebial">@mohebial </a></div>
</li>
</ul>
</li>
<li>
<div><a href="https://engelhardlab.com/">Ben Engelhard</a></div>
</li>
</ul>

<p></p></div>
			</div> <div class="et_pb_module et_pb_text et_pb_text_5 et_pb_text_align_left et_pb_bg_layout_light">
				
				
				<div class="et_pb_text_inner"><p><span style="color:#0f1424;">Timestamps:</span></p>
<p><span style="color:#0f1424;"></span></p>
<div>0:00 – Intro</div>
<div>5:02 – Virtual Dopamine Conference</div>
<div>9:56 – History of dopamine’s roles</div>
<div>16:47 – Dopamine circuits</div>
<div>21:13 – Multiple roles for dopamine</div>
<div>31:43 – Deep learning panel discussion</div>
<div>50:14 – Computation and neuromodulation</div></div>
			</div> 
			</div> <div class="et_pb_column et_pb_column_1_2 et_pb_column_2 et_pb_css_mix_blend_mode_passthrough et-last-child">
				
				
				<div class="et_pb_module et_pb_image et_pb_image_2">
				
				
				<span class="et_pb_image_wrap"><img src="https://braininspired.co/wp-content/uploads/2021/08/ali.jpg" alt="" title="ali" class="wp-image-1416" /></span>
			</div><div class="et_pb_module et_pb_image et_pb_image_3">
				
				
				<span class="et_pb_image_wrap"><img src="https://braininspired.co/wp-content/uploads/2021/08/ben2.jpg" alt="" title="ben2" class="wp-image-1417" /></span>
			</div>
			</div> 
				
				
			</div> 
				
				
			</div> 
]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/ViDA-Ali-Mohebi-and-Ben-Engelhard.mp3" length="71279990"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[
				
				
				
				
					
				
				
				
				
				
				
				BI 112:
Ali Mohebi and Ben Engelhard
The Many Faces of Dopamine
			 
				
				
				Announcement:
Ben has started his new lab and is recruiting grad students. 
Check out his lab here and apply!
Engelhard Lab
 
			 
				
				
				
				
					
				
				
			
				
				
				
			
				
				
				Ali and Ben discuss the ever-expanding discoveries about the roles dopamine plays for our cognition. Dopamine is known to play a role in learning – dopamine (DA) neurons fire when our reward expectations aren’t met, and that signal helps adjust our expectation. Roughly, DA corresponds to a reward prediction error. The reward prediction error has helped reinforcement learning in AI develop into a raging success, specially with deep reinforcement learning models trained to out-perform humans in games like chess and Go. But DA likely contributes a lot more to brain function. We discuss many of those possible roles, how to think about computation with respect to neuromodulators like DA, how different time and spatial scales interact, and more.
			 
				
				
				
			
				
				
				Dopamine: A Simple AND Complex Story 
by Daphne Cornelisse
			 
			 
				
				
			 
				
				
			 
				
				
				
				
					
				]]>
                </itunes:summary>
                                                                            <itunes:duration>01:13:56</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI NMA 06: Advancing Neuro Deep Learning Panel]]>
                </title>
                <pubDate>Thu, 19 Aug 2021 07:48:00 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-nma-06-advancing-neuro-deep-learning-panel</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-nma-06-advancing-neuro-deep-learning-panel</link>
                                <description>
                                            <![CDATA[<p>This is the 6th in a series of panel discussions in collaboration with <a href="https://academy.neuromatch.io/home">Neuromatch Academy</a>, the online computational neuroscience summer school. This is the 3rd of 3 in the deep learning series. In this episode, the panelists discuss their experiences with advanced topics in deep learning; unsupervised &amp; self-supervised learning, reinforcement learning, continual learning/causality.</p>
<p> </p>
<h3><span style="color:#000000;">Guests</span> </h3>
<ul>
<li>
<div><a href="http://webdocs.cs.ualberta.ca/~alona/">Alona Fyshe</a>.</div>
<ul>
<li>
<div><a href="https://twitter.com/alonamarie">@alonamarie</a>.</div>
</li>
</ul>
</li>
<li>
<div><a href="http://www.janexwang.com/">Jane Wang</a>.</div>
<ul>
<li>
<div><a href="http://www.twitter.com/janexwang/">@janexwang</a>.</div>
</li>
<li>
<div><a href="https://braininspired.co/podcast/83/">BI 083 Jane Wang: Evolving Altruism in AI</a>.</div>
</li>
</ul>
</li>
<li>
<div><a href="https://www.momen-nejad.org/">Ida Momennejad</a>.</div>
<ul>
<li>
<div> <a href="https://twitter.com/criticalneuro">@criticalneuro.</a></div>
</li>
<li>
<div><a href="https://braininspired.co/podcast/85/">BI 085 Ida Momennejad: Learning Representations</a>.</div>
</li>
</ul>
</li>
</ul>
<h4><span style="font-size:18px;">The other panels:</span> </h4>
<ul>
<li><a href="https://braininspired.co/podcast/nma-1/">First panel</a>, about model fitting, GLMs/machine learning, dimensionality reduction, and deep learning.</li>
<li><a href="https://braininspired.co/podcast/nma-2/">Second panel</a>, about linear systems, real neurons, and dynamic networks.</li>
<li><a href="https://braininspired.co/podcast/nma-3/">Third panel</a>, about stochastic processes, including Bayes, decision-making, optimal control, reinforcement learning, and causality.</li>
<li><a href="https://braininspired.co/podcast/nma-4/">Fourth panel</a>, about some basics in deep learning, including Linear deep learning, Pytorch, multi-layer-perceptrons, optimization, &amp; regularization.</li>
<li><a href="https://braininspired.co/podcast/nma-5/">Fifth panel</a>, about “doing more with fewer parameters: Convnets, RNNs, attention &amp; transformers, generative models (VAEs &amp; GANs).</li>
</ul>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[This is the 6th in a series of panel discussions in collaboration with Neuromatch Academy, the online computational neuroscience summer school. This is the 3rd of 3 in the deep learning series. In this episode, the panelists discuss their experiences with advanced topics in deep learning; unsupervised & self-supervised learning, reinforcement learning, continual learning/causality.
 
Guests 


Alona Fyshe.


@alonamarie.




Jane Wang.


@janexwang.


BI 083 Jane Wang: Evolving Altruism in AI.




Ida Momennejad.


 @criticalneuro.


BI 085 Ida Momennejad: Learning Representations.




The other panels: 

First panel, about model fitting, GLMs/machine learning, dimensionality reduction, and deep learning.
Second panel, about linear systems, real neurons, and dynamic networks.
Third panel, about stochastic processes, including Bayes, decision-making, optimal control, reinforcement learning, and causality.
Fourth panel, about some basics in deep learning, including Linear deep learning, Pytorch, multi-layer-perceptrons, optimization, & regularization.
Fifth panel, about “doing more with fewer parameters: Convnets, RNNs, attention & transformers, generative models (VAEs & GANs).
]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI NMA 06: Advancing Neuro Deep Learning Panel]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p>This is the 6th in a series of panel discussions in collaboration with <a href="https://academy.neuromatch.io/home">Neuromatch Academy</a>, the online computational neuroscience summer school. This is the 3rd of 3 in the deep learning series. In this episode, the panelists discuss their experiences with advanced topics in deep learning; unsupervised &amp; self-supervised learning, reinforcement learning, continual learning/causality.</p>
<p> </p>
<h3><span style="color:#000000;">Guests</span> </h3>
<ul>
<li>
<div><a href="http://webdocs.cs.ualberta.ca/~alona/">Alona Fyshe</a>.</div>
<ul>
<li>
<div><a href="https://twitter.com/alonamarie">@alonamarie</a>.</div>
</li>
</ul>
</li>
<li>
<div><a href="http://www.janexwang.com/">Jane Wang</a>.</div>
<ul>
<li>
<div><a href="http://www.twitter.com/janexwang/">@janexwang</a>.</div>
</li>
<li>
<div><a href="https://braininspired.co/podcast/83/">BI 083 Jane Wang: Evolving Altruism in AI</a>.</div>
</li>
</ul>
</li>
<li>
<div><a href="https://www.momen-nejad.org/">Ida Momennejad</a>.</div>
<ul>
<li>
<div> <a href="https://twitter.com/criticalneuro">@criticalneuro.</a></div>
</li>
<li>
<div><a href="https://braininspired.co/podcast/85/">BI 085 Ida Momennejad: Learning Representations</a>.</div>
</li>
</ul>
</li>
</ul>
<h4><span style="font-size:18px;">The other panels:</span> </h4>
<ul>
<li><a href="https://braininspired.co/podcast/nma-1/">First panel</a>, about model fitting, GLMs/machine learning, dimensionality reduction, and deep learning.</li>
<li><a href="https://braininspired.co/podcast/nma-2/">Second panel</a>, about linear systems, real neurons, and dynamic networks.</li>
<li><a href="https://braininspired.co/podcast/nma-3/">Third panel</a>, about stochastic processes, including Bayes, decision-making, optimal control, reinforcement learning, and causality.</li>
<li><a href="https://braininspired.co/podcast/nma-4/">Fourth panel</a>, about some basics in deep learning, including Linear deep learning, Pytorch, multi-layer-perceptrons, optimization, &amp; regularization.</li>
<li><a href="https://braininspired.co/podcast/nma-5/">Fifth panel</a>, about “doing more with fewer parameters: Convnets, RNNs, attention &amp; transformers, generative models (VAEs &amp; GANs).</li>
</ul>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/nma-06.mp3" length="77609938"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[This is the 6th in a series of panel discussions in collaboration with Neuromatch Academy, the online computational neuroscience summer school. This is the 3rd of 3 in the deep learning series. In this episode, the panelists discuss their experiences with advanced topics in deep learning; unsupervised & self-supervised learning, reinforcement learning, continual learning/causality.
 
Guests 


Alona Fyshe.


@alonamarie.




Jane Wang.


@janexwang.


BI 083 Jane Wang: Evolving Altruism in AI.




Ida Momennejad.


 @criticalneuro.


BI 085 Ida Momennejad: Learning Representations.




The other panels: 

First panel, about model fitting, GLMs/machine learning, dimensionality reduction, and deep learning.
Second panel, about linear systems, real neurons, and dynamic networks.
Third panel, about stochastic processes, including Bayes, decision-making, optimal control, reinforcement learning, and causality.
Fourth panel, about some basics in deep learning, including Linear deep learning, Pytorch, multi-layer-perceptrons, optimization, & regularization.
Fifth panel, about “doing more with fewer parameters: Convnets, RNNs, attention & transformers, generative models (VAEs & GANs).
]]>
                </itunes:summary>
                                                                            <itunes:duration>01:20:32</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI NMA 05: NLP and Generative Models Panel]]>
                </title>
                <pubDate>Fri, 13 Aug 2021 08:11:40 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-nma-05-nlp-and-generative-models-panel</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-nma-05-nlp-and-generative-models-panel</link>
                                <description>
                                            <![CDATA[<div class="et_pb_section et_pb_section_0 et_pb_with_background et_pb_section_parallax et_section_regular">
				
				
				
				
					<div class="et_pb_row et_pb_row_0">
				<div class="et_pb_column et_pb_column_4_4 et_pb_column_0 et_pb_css_mix_blend_mode_passthrough et-last-child">
				
				
				<div class="et_pb_module et_pb_text et_pb_text_0 et_pb_text_align_center et_pb_bg_layout_dark">
				
				
				<div class="et_pb_text_inner"><h4><span style="color:#0f1424;">BI NMA 05:</span></h4>
<h1><span style="color:#0f1424;"><strong>NLP and Generative Models Panel</strong></span></h1></div>
			</div> <div class="et_pb_module et_pb_post_title et_pb_post_title_0 et_pb_bg_layout_light et_pb_text_align_center">
				
				
				
				<div class="et_pb_title_container">
					
				</div>
				
			</div><div class="et_pb_module et_pb_image et_pb_image_0">
				
				
				<a href="https://www.patreon.com/braininspired" target="_blank" rel="noreferrer noopener"><span class="et_pb_image_wrap"><img src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" title="become_a_patron_button@2x" class="wp-image-585" /></span></a>
			</div><div class="et_pb_module et_pb_text et_pb_text_1 et_pb_text_align_center et_pb_bg_layout_dark">
				
				
				<div class="et_pb_text_inner"><p style="text-align:left;"><span>This is the 5th in a series of panel discussions in collaboration with </span><a href="https://academy.neuromatch.io/home">Neuromatch Academy</a><span>, the online computational neuroscience summer school. This is the 2nd of 3 in the deep learning series. In this episode, the panelists discuss their experiences “doing more with fewer parameters: Convnets, RNNs, attention &amp; transformers, generative models (VAEs &amp; GANs).</span></p></div>
			</div> 
			</div> 
				
				
			</div> 
				
				
			</div> <div class="et_pb_section et_pb_section_1 et_pb_section_parallax et_pb_with_background et_section_regular">
				
				<div class="et_parallax_bg_wrap"><div class="et_parallax_bg" style="background-image:url(&quot;https://braininspired.co/wp-content/uploads/2021/08/podcast-06.png&quot;);"></div></div>
				
				
					<div class="et_pb_row et_pb_row_1 et_hover_enabled">
				<div class="et_pb_column et_pb_column_4_4 et_pb_column_1 et_pb_css_mix_blend_mode_passthrough et-last-child">
				
				
				<div class="et_pb_module et_pb_text et_pb_text_2 et_pb_text_align_left et_pb_bg_layout_dark">
				
				
				<div class="et_pb_text_inner"><p></p>
<h3><span style="color:#000000;">Panelists</span></h3>
<p> </p>
<ul>
<li><a href="https://psych.la.psu.edu/directory/bpw10">Brad Wyble.</a>
<ul>
<li><a href="https://twitter.com/bradpwyble">@bradpwyble</a>.</li>
</ul>
</li>
<li><a href="https://www.kyunghyuncho.me/">Kyunghyun Cho.</a>
<ul>
<li><a href="https://twitter.com/kchonyc">@kchonyc</a>.</li>
</ul>
</li>
<li><a href="https://hhexiy.github.io/">He He</a>.
<ul>
<li><a href="https://twitter.com/hhexiy">@hhexiy.</a></li>
</ul>
</li>
<li><a href="https://www.stern.nyu.edu/faculty/bio/joao-sedoc">João Sedoc.</a>
<ul>
<li><a href="https://twitter.com/JoaoSedoc">@JoaoSedoc</a>.</li>
</ul>
</li>
</ul>
<p><span style="font-size:18px;">The other panels:</span> </p>
<ul>
<li><a href="https://braininspired.co/podcast/nma-1/">First panel</a>, about model fitting, GLMs/machine learning, dimensionality reduction, and deep learning.</li>
<li><a href="https://braininspired.co/podcast/nma-2/">Second panel</a>, about linear systems, real neurons, and dynamic networks.</li>
<li><a href="https://braininspired.co/podcast/nma-3/">Third panel</a>, about stochastic processes, including Bayes, decision-making, optimal control, reinforcement learning, and causality.</li>
<li><a href="https://braininspired.co/podcast/nma-4/">Fourth panel</a>, about some basics in deep learning, including Linear deep learning, Pytorch, multi-layer-perceptrons, optimization, &amp; regularization.</li>
<li><a href="https://braininspired.co/podcast/nma-6/">Sixth panel</a>, a...</li></ul></div></div></div></div></div>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[
				
				
				
				
					
				
				
				
				
				
				
				BI NMA 05:
NLP and Generative Models Panel
			 
				
				
				
				
					
				
				
			
				
				
				
			
				
				
				This is the 5th in a series of panel discussions in collaboration with Neuromatch Academy, the online computational neuroscience summer school. This is the 2nd of 3 in the deep learning series. In this episode, the panelists discuss their experiences “doing more with fewer parameters: Convnets, RNNs, attention & transformers, generative models (VAEs & GANs).
			 
			 
				
				
			 
				
				
			 
				
				
				
				
					
				
				
				
				
				
				
				
Panelists
 

Brad Wyble.

@bradpwyble.


Kyunghyun Cho.

@kchonyc.


He He.

@hhexiy.


João Sedoc.

@JoaoSedoc.



The other panels: 

First panel, about model fitting, GLMs/machine learning, dimensionality reduction, and deep learning.
Second panel, about linear systems, real neurons, and dynamic networks.
Third panel, about stochastic processes, including Bayes, decision-making, optimal control, reinforcement learning, and causality.
Fourth panel, about some basics in deep learning, including Linear deep learning, Pytorch, multi-layer-perceptrons, optimization, & regularization.
Sixth panel, a...]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI NMA 05: NLP and Generative Models Panel]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<div class="et_pb_section et_pb_section_0 et_pb_with_background et_pb_section_parallax et_section_regular">
				
				
				
				
					<div class="et_pb_row et_pb_row_0">
				<div class="et_pb_column et_pb_column_4_4 et_pb_column_0 et_pb_css_mix_blend_mode_passthrough et-last-child">
				
				
				<div class="et_pb_module et_pb_text et_pb_text_0 et_pb_text_align_center et_pb_bg_layout_dark">
				
				
				<div class="et_pb_text_inner"><h4><span style="color:#0f1424;">BI NMA 05:</span></h4>
<h1><span style="color:#0f1424;"><strong>NLP and Generative Models Panel</strong></span></h1></div>
			</div> <div class="et_pb_module et_pb_post_title et_pb_post_title_0 et_pb_bg_layout_light et_pb_text_align_center">
				
				
				
				<div class="et_pb_title_container">
					
				</div>
				
			</div><div class="et_pb_module et_pb_image et_pb_image_0">
				
				
				<a href="https://www.patreon.com/braininspired" target="_blank" rel="noreferrer noopener"><span class="et_pb_image_wrap"><img src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" title="become_a_patron_button@2x" class="wp-image-585" /></span></a>
			</div><div class="et_pb_module et_pb_text et_pb_text_1 et_pb_text_align_center et_pb_bg_layout_dark">
				
				
				<div class="et_pb_text_inner"><p style="text-align:left;"><span>This is the 5th in a series of panel discussions in collaboration with </span><a href="https://academy.neuromatch.io/home">Neuromatch Academy</a><span>, the online computational neuroscience summer school. This is the 2nd of 3 in the deep learning series. In this episode, the panelists discuss their experiences “doing more with fewer parameters: Convnets, RNNs, attention &amp; transformers, generative models (VAEs &amp; GANs).</span></p></div>
			</div> 
			</div> 
				
				
			</div> 
				
				
			</div> <div class="et_pb_section et_pb_section_1 et_pb_section_parallax et_pb_with_background et_section_regular">
				
				<div class="et_parallax_bg_wrap"><div class="et_parallax_bg" style="background-image:url(&quot;https://braininspired.co/wp-content/uploads/2021/08/podcast-06.png&quot;);"></div></div>
				
				
					<div class="et_pb_row et_pb_row_1 et_hover_enabled">
				<div class="et_pb_column et_pb_column_4_4 et_pb_column_1 et_pb_css_mix_blend_mode_passthrough et-last-child">
				
				
				<div class="et_pb_module et_pb_text et_pb_text_2 et_pb_text_align_left et_pb_bg_layout_dark">
				
				
				<div class="et_pb_text_inner"><p></p>
<h3><span style="color:#000000;">Panelists</span></h3>
<p> </p>
<ul>
<li><a href="https://psych.la.psu.edu/directory/bpw10">Brad Wyble.</a>
<ul>
<li><a href="https://twitter.com/bradpwyble">@bradpwyble</a>.</li>
</ul>
</li>
<li><a href="https://www.kyunghyuncho.me/">Kyunghyun Cho.</a>
<ul>
<li><a href="https://twitter.com/kchonyc">@kchonyc</a>.</li>
</ul>
</li>
<li><a href="https://hhexiy.github.io/">He He</a>.
<ul>
<li><a href="https://twitter.com/hhexiy">@hhexiy.</a></li>
</ul>
</li>
<li><a href="https://www.stern.nyu.edu/faculty/bio/joao-sedoc">João Sedoc.</a>
<ul>
<li><a href="https://twitter.com/JoaoSedoc">@JoaoSedoc</a>.</li>
</ul>
</li>
</ul>
<p><span style="font-size:18px;">The other panels:</span> </p>
<ul>
<li><a href="https://braininspired.co/podcast/nma-1/">First panel</a>, about model fitting, GLMs/machine learning, dimensionality reduction, and deep learning.</li>
<li><a href="https://braininspired.co/podcast/nma-2/">Second panel</a>, about linear systems, real neurons, and dynamic networks.</li>
<li><a href="https://braininspired.co/podcast/nma-3/">Third panel</a>, about stochastic processes, including Bayes, decision-making, optimal control, reinforcement learning, and causality.</li>
<li><a href="https://braininspired.co/podcast/nma-4/">Fourth panel</a>, about some basics in deep learning, including Linear deep learning, Pytorch, multi-layer-perceptrons, optimization, &amp; regularization.</li>
<li><a href="https://braininspired.co/podcast/nma-6/">Sixth panel</a>, about advanced topics in deep learning: unsupervised &amp; self-supervised learning, reinforcement learning, continual learning/causality.</li>
</ul>
<p></p></div>
			</div> 
			</div> 
				
				
			</div> 
				
				
			</div> 
]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/nma-05.mp3" length="80782243"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[
				
				
				
				
					
				
				
				
				
				
				
				BI NMA 05:
NLP and Generative Models Panel
			 
				
				
				
				
					
				
				
			
				
				
				
			
				
				
				This is the 5th in a series of panel discussions in collaboration with Neuromatch Academy, the online computational neuroscience summer school. This is the 2nd of 3 in the deep learning series. In this episode, the panelists discuss their experiences “doing more with fewer parameters: Convnets, RNNs, attention & transformers, generative models (VAEs & GANs).
			 
			 
				
				
			 
				
				
			 
				
				
				
				
					
				
				
				
				
				
				
				
Panelists
 

Brad Wyble.

@bradpwyble.


Kyunghyun Cho.

@kchonyc.


He He.

@hhexiy.


João Sedoc.

@JoaoSedoc.



The other panels: 

First panel, about model fitting, GLMs/machine learning, dimensionality reduction, and deep learning.
Second panel, about linear systems, real neurons, and dynamic networks.
Third panel, about stochastic processes, including Bayes, decision-making, optimal control, reinforcement learning, and causality.
Fourth panel, about some basics in deep learning, including Linear deep learning, Pytorch, multi-layer-perceptrons, optimization, & regularization.
Sixth panel, a...]]>
                </itunes:summary>
                                                                            <itunes:duration>01:23:50</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI NMA 04: Deep Learning Basics Panel]]>
                </title>
                <pubDate>Fri, 06 Aug 2021 07:37:19 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-nma-04-deep-learning-basics-panel</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-nma-04-deep-learning-basics-panel</link>
                                <description>
                                            <![CDATA[<div class="et_pb_section et_pb_section_0 et_pb_with_background et_pb_section_parallax et_section_regular">
				
				
				
				
					<div class="et_pb_row et_pb_row_0">
				<div class="et_pb_column et_pb_column_4_4 et_pb_column_0 et_pb_css_mix_blend_mode_passthrough et-last-child">
				
				
				<div class="et_pb_module et_pb_text et_pb_text_0 et_pb_text_align_center et_pb_bg_layout_dark">
				
				
				<div class="et_pb_text_inner"><h4><span style="color:#0f1424;">BI NMA 04:</span></h4>
<h1><span style="color:#0f1424;"><strong>Deep Learning Basics Panel</strong></span></h1></div>
			</div> <div class="et_pb_module et_pb_post_title et_pb_post_title_0 et_pb_bg_layout_light et_pb_text_align_center">
				
				
				
				<div class="et_pb_title_container">
					
				</div>
				
			</div><div class="et_pb_module et_pb_image et_pb_image_0">
				
				
				<a href="https://www.patreon.com/braininspired" target="_blank" rel="noreferrer noopener"><span class="et_pb_image_wrap"><img src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" title="become_a_patron_button@2x" class="wp-image-585" /></span></a>
			</div><div class="et_pb_module et_pb_text et_pb_text_1 et_pb_text_align_center et_pb_bg_layout_dark">
				
				
				<div class="et_pb_text_inner"><p style="text-align:left;"><span>This is the 4th in a series of panel discussions in collaboration with </span><a href="https://academy.neuromatch.io/home">Neuromatch Academy</a><span>, the online computational neuroscience summer school. This is the first of 3 in the deep learning series. In this episode, the panelists discuss their experiences with some basics in deep learning, including Linear deep learning, Pytorch, multi-layer-perceptrons, optimization, &amp; regularization.</span></p></div>
			</div> 
			</div> 
				
				
			</div> 
				
				
			</div> <div class="et_pb_section et_pb_section_1 et_pb_section_parallax et_pb_with_background et_section_regular">
				
				<div class="et_parallax_bg_wrap"><div class="et_parallax_bg" style="background-image:url(&quot;https://braininspired.co/wp-content/uploads/2021/08/podcast-06.png&quot;);"></div></div>
				
				
					<div class="et_pb_row et_pb_row_1 et_hover_enabled">
				<div class="et_pb_column et_pb_column_4_4 et_pb_column_1 et_pb_css_mix_blend_mode_passthrough et-last-child">
				
				
				<div class="et_pb_module et_pb_text et_pb_text_2 et_pb_text_align_left et_pb_bg_layout_light">
				
				
				<div class="et_pb_text_inner">
<h3>Guests </h3>
<ul>
<li><a href="https://www.linkedin.com/in/amitakapoor/?originalSubdomain=in">Amita Kapoor</a></li>
<li><a href="https://www.cis.upenn.edu/~ungar/">Lyle Ungar</a>
<ul>
<li><a href="https://twitter.com/LyleUngar">@LyleUngar</a></li>
</ul>
</li>
<li><a href="https://ganguli-gang.stanford.edu/surya.html">Surya Ganguli</a>
<ul>
<li><a href="https://twitter.com/SuryaGanguli">@SuryaGanguli</a></li>
</ul>
</li>
</ul>
<h4><span style="font-size:18px;">The other panels:</span> </h4>
<ul>
<li><a href="https://braininspired.co/podcast/nma-1/">First panel</a>, about model fitting, GLMs/machine learning, dimensionality reduction, and deep learning.</li>
<li><a href="https://braininspired.co/podcast/nma-2/">Second panel</a>, about linear systems, real neurons, and dynamic networks.</li>
<li><a href="https://braininspired.co/podcast/nma-3/">Third panel</a>, about stochastic processes, including Bayes, decision-making, optimal control, reinforcement learning, and causality.</li>
<li><a href="https://braininspired.co/podcast/nma-5/">Fifth panel</a>, about “doing more with fewer parameters: Convnets, RNNs, attention &amp; transformers, generative models (VAEs &amp; GANs).</li>
<li><a href="https://braininspired.co/podcast/nma-6/">Sixth panel</a>, about advanced topics in deep learning: unsupervised &amp; self-supervised learning, reinforcement learning, continual learning/causality.</li>
</ul>
<p> </p>
<p></p></div>
			</div> <div class="et_pb_module et_pb_text et_pb_tex...&lt;/div&gt;&lt;/body&gt;&lt;/html&gt;"></div></div></div></div>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[
				
				
				
				
					
				
				
				
				
				
				
				BI NMA 04:
Deep Learning Basics Panel
			 
				
				
				
				
					
				
				
			
				
				
				
			
				
				
				This is the 4th in a series of panel discussions in collaboration with Neuromatch Academy, the online computational neuroscience summer school. This is the first of 3 in the deep learning series. In this episode, the panelists discuss their experiences with some basics in deep learning, including Linear deep learning, Pytorch, multi-layer-perceptrons, optimization, & regularization.
			 
			 
				
				
			 
				
				
			 
				
				
				
				
					
				
				
				
				
				
				
				
Guests 

Amita Kapoor
Lyle Ungar

@LyleUngar


Surya Ganguli

@SuryaGanguli



The other panels: 

First panel, about model fitting, GLMs/machine learning, dimensionality reduction, and deep learning.
Second panel, about linear systems, real neurons, and dynamic networks.
Third panel, about stochastic processes, including Bayes, decision-making, optimal control, reinforcement learning, and causality.
Fifth panel, about “doing more with fewer parameters: Convnets, RNNs, attention & transformers, generative models (VAEs & GANs).
Sixth panel, about advanced topics in deep learning: unsupervised & self-supervised learning, reinforcement learning, continual learning/causality.

 

			 ]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI NMA 04: Deep Learning Basics Panel]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<div class="et_pb_section et_pb_section_0 et_pb_with_background et_pb_section_parallax et_section_regular">
				
				
				
				
					<div class="et_pb_row et_pb_row_0">
				<div class="et_pb_column et_pb_column_4_4 et_pb_column_0 et_pb_css_mix_blend_mode_passthrough et-last-child">
				
				
				<div class="et_pb_module et_pb_text et_pb_text_0 et_pb_text_align_center et_pb_bg_layout_dark">
				
				
				<div class="et_pb_text_inner"><h4><span style="color:#0f1424;">BI NMA 04:</span></h4>
<h1><span style="color:#0f1424;"><strong>Deep Learning Basics Panel</strong></span></h1></div>
			</div> <div class="et_pb_module et_pb_post_title et_pb_post_title_0 et_pb_bg_layout_light et_pb_text_align_center">
				
				
				
				<div class="et_pb_title_container">
					
				</div>
				
			</div><div class="et_pb_module et_pb_image et_pb_image_0">
				
				
				<a href="https://www.patreon.com/braininspired" target="_blank" rel="noreferrer noopener"><span class="et_pb_image_wrap"><img src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" title="become_a_patron_button@2x" class="wp-image-585" /></span></a>
			</div><div class="et_pb_module et_pb_text et_pb_text_1 et_pb_text_align_center et_pb_bg_layout_dark">
				
				
				<div class="et_pb_text_inner"><p style="text-align:left;"><span>This is the 4th in a series of panel discussions in collaboration with </span><a href="https://academy.neuromatch.io/home">Neuromatch Academy</a><span>, the online computational neuroscience summer school. This is the first of 3 in the deep learning series. In this episode, the panelists discuss their experiences with some basics in deep learning, including Linear deep learning, Pytorch, multi-layer-perceptrons, optimization, &amp; regularization.</span></p></div>
			</div> 
			</div> 
				
				
			</div> 
				
				
			</div> <div class="et_pb_section et_pb_section_1 et_pb_section_parallax et_pb_with_background et_section_regular">
				
				<div class="et_parallax_bg_wrap"><div class="et_parallax_bg" style="background-image:url(&quot;https://braininspired.co/wp-content/uploads/2021/08/podcast-06.png&quot;);"></div></div>
				
				
					<div class="et_pb_row et_pb_row_1 et_hover_enabled">
				<div class="et_pb_column et_pb_column_4_4 et_pb_column_1 et_pb_css_mix_blend_mode_passthrough et-last-child">
				
				
				<div class="et_pb_module et_pb_text et_pb_text_2 et_pb_text_align_left et_pb_bg_layout_light">
				
				
				<div class="et_pb_text_inner">
<h3>Guests </h3>
<ul>
<li><a href="https://www.linkedin.com/in/amitakapoor/?originalSubdomain=in">Amita Kapoor</a></li>
<li><a href="https://www.cis.upenn.edu/~ungar/">Lyle Ungar</a>
<ul>
<li><a href="https://twitter.com/LyleUngar">@LyleUngar</a></li>
</ul>
</li>
<li><a href="https://ganguli-gang.stanford.edu/surya.html">Surya Ganguli</a>
<ul>
<li><a href="https://twitter.com/SuryaGanguli">@SuryaGanguli</a></li>
</ul>
</li>
</ul>
<h4><span style="font-size:18px;">The other panels:</span> </h4>
<ul>
<li><a href="https://braininspired.co/podcast/nma-1/">First panel</a>, about model fitting, GLMs/machine learning, dimensionality reduction, and deep learning.</li>
<li><a href="https://braininspired.co/podcast/nma-2/">Second panel</a>, about linear systems, real neurons, and dynamic networks.</li>
<li><a href="https://braininspired.co/podcast/nma-3/">Third panel</a>, about stochastic processes, including Bayes, decision-making, optimal control, reinforcement learning, and causality.</li>
<li><a href="https://braininspired.co/podcast/nma-5/">Fifth panel</a>, about “doing more with fewer parameters: Convnets, RNNs, attention &amp; transformers, generative models (VAEs &amp; GANs).</li>
<li><a href="https://braininspired.co/podcast/nma-6/">Sixth panel</a>, about advanced topics in deep learning: unsupervised &amp; self-supervised learning, reinforcement learning, continual learning/causality.</li>
</ul>
<p> </p>
<p></p></div>
			</div> <div class="et_pb_module et_pb_text et_pb_text_3 et_pb_text_align_left et_pb_bg_layout_light">
				
				
				<div class="et_pb_text_inner"><span style="color:#0f1424;">Timestamps:</span>
<p> </p></div>
			</div> 
			</div> 
				
				
			</div> 
				
				
			</div> 
]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/nma-04.mp3" length="57282479"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[
				
				
				
				
					
				
				
				
				
				
				
				BI NMA 04:
Deep Learning Basics Panel
			 
				
				
				
				
					
				
				
			
				
				
				
			
				
				
				This is the 4th in a series of panel discussions in collaboration with Neuromatch Academy, the online computational neuroscience summer school. This is the first of 3 in the deep learning series. In this episode, the panelists discuss their experiences with some basics in deep learning, including Linear deep learning, Pytorch, multi-layer-perceptrons, optimization, & regularization.
			 
			 
				
				
			 
				
				
			 
				
				
				
				
					
				
				
				
				
				
				
				
Guests 

Amita Kapoor
Lyle Ungar

@LyleUngar


Surya Ganguli

@SuryaGanguli



The other panels: 

First panel, about model fitting, GLMs/machine learning, dimensionality reduction, and deep learning.
Second panel, about linear systems, real neurons, and dynamic networks.
Third panel, about stochastic processes, including Bayes, decision-making, optimal control, reinforcement learning, and causality.
Fifth panel, about “doing more with fewer parameters: Convnets, RNNs, attention & transformers, generative models (VAEs & GANs).
Sixth panel, about advanced topics in deep learning: unsupervised & self-supervised learning, reinforcement learning, continual learning/causality.

 

			 ]]>
                </itunes:summary>
                                                                            <itunes:duration>00:59:21</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 111 Kevin Mitchell and Erik Hoel: Agency, Emergence, Consciousness]]>
                </title>
                <pubDate>Tue, 27 Jul 2021 21:08:10 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-111-kevin-mitchell-and-erik-hoel-agency-emergence-consciousness</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-111-kevin-mitchell-and-erik-hoel-agency-emergence-consciousness</link>
                                <description>
                                            <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2021/07/111-art-01.jpg" alt="" class="wp-image-1261" />



<div class="wp-block-image"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></div>



<ul class="blocks-gallery-grid"><li class="blocks-gallery-item"><img width="1024" height="734" src="https://braininspired.co/wp-content/uploads/2021/07/erikHead-2-1024x734.jpg" alt="" class="wp-image-1267" /></li><li class="blocks-gallery-item"><img width="618" height="770" src="https://braininspired.co/wp-content/uploads/2021/07/kevinHead-1.jpeg" alt="" class="wp-image-1268" />Kevin Mitchell Pic Paul Sharp/SHARPPIX</li></ul>



<p>Erik, Kevin, and I discuss… well a lot of things. </p>



<p>Erik’s recent novel The Revelations is a story about a group of neuroscientists trying to develop a good theory of consciousness (with a murder mystery plot). </p>



<p>Kevin’s book Innate – How the Wiring of Our Brains Shapes Who We Are describes the messy process of getting from DNA, traversing epigenetics and development, to our personalities. </p>



<p>We talk about both books, then dive deeper into topics like whether brains evolved for moving our bodies vs. consciousness, how information theory is lending insights to emergent phenomena, and the role of agency with respect to what counts as intelligence.</p>



<p></p>



<ul class="blocks-gallery-grid"><li class="blocks-gallery-item"><img width="150" height="150" src="https://braininspired.co/wp-content/uploads/2021/07/innateThumb-150x150.jpg" alt="" class="wp-image-1262" /></li><li class="blocks-gallery-item"><img width="150" height="150" src="https://braininspired.co/wp-content/uploads/2021/07/revelationsThumb-150x150.jpg" alt="" class="wp-image-1263" /></li></ul>



<ul><li><a href="https://www.kjmitchell.com/">Kevin’s website</a>.</li><li><a href="https://www.erikphoel.com/">Eriks’ website</a>.</li><li>Twitter: <a href="https://twitter.com/WiringTheBrain?ref_src=twsrc%5Egoogle%7Ctwcamp%5Eserp%7Ctwgr%5Eauthor">@WiringtheBrain</a> (Kevin); <a href="https://twitter.com/erikphoel">@erikphoel</a> (Erik)</li><li>Books:<ul><li><a href="https://www.amazon.com/gp/product/B07CSHZRGN/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=B07CSHZRGN&amp;linkId=31d60129dc19af78bb1b5851e69f1db8">INNATE – How the Wiring of Our Brains Shapes Who We Are</a></li><li><a href="https://www.amazon.com/gp/product/1419750224/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=1419750224&amp;linkId=d9e2348c2893510421c9f2afc94442e7">The Revelations</a></li></ul></li><li>Papers<ul><li>Erik<ul><li><a href="https://arxiv.org/abs/2004.03541">Falsification and consciousness</a>.</li><li><a href="https://www.hindawi.com/journals/complexity/2020/8932526/">The emergence of informative higher scales in complex networks</a>.</li><li><a href="https://arxiv.org/abs/2104.13368">Emergence as the conversion of information: A unifying theory</a>.</li></ul></li></ul></li></ul>



<p>Timestamps</p>



<p>0:00 – Intro<br />3:28 – The Revelations – Erik’s novel<br />15:15 – Innate – Kevin’s book<br />22:56 – Cycle of progress<br />29:05 – Brains for movement or consciousness?<br />46:46 – Freud’s influence<br />59:18 – Theories of consciousness<br />1:02:02 – Meaning and emergence<br />1:05:50 – Reduction in neuroscience<br />1:23:03 – Micro and macro – emergence<br />1:29:35 – Agency and intelligence<br /></p>


<div class="lightweight-accordion"><span>Transcript</span><div class="lightweight-accordion-body">

<p>Eric, a fresh congratulations to you and Kevin, a stale, I suppose if two years to three years to innate was written in 2018, right. Or published, I don’t know when it was written and Eric a fresh congratulations to you. Although I know that the revelations, your book has been a long...</p></div></div>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[








Kevin Mitchell Pic Paul Sharp/SHARPPIX



Erik, Kevin, and I discuss… well a lot of things. 



Erik’s recent novel The Revelations is a story about a group of neuroscientists trying to develop a good theory of consciousness (with a murder mystery plot). 



Kevin’s book Innate – How the Wiring of Our Brains Shapes Who We Are describes the messy process of getting from DNA, traversing epigenetics and development, to our personalities. 



We talk about both books, then dive deeper into topics like whether brains evolved for moving our bodies vs. consciousness, how information theory is lending insights to emergent phenomena, and the role of agency with respect to what counts as intelligence.











Kevin’s website.Eriks’ website.Twitter: @WiringtheBrain (Kevin); @erikphoel (Erik)Books:INNATE – How the Wiring of Our Brains Shapes Who We AreThe RevelationsPapersErikFalsification and consciousness.The emergence of informative higher scales in complex networks.Emergence as the conversion of information: A unifying theory.



Timestamps



0:00 – Intro3:28 – The Revelations – Erik’s novel15:15 – Innate – Kevin’s book22:56 – Cycle of progress29:05 – Brains for movement or consciousness?46:46 – Freud’s influence59:18 – Theories of consciousness1:02:02 – Meaning and emergence1:05:50 – Reduction in neuroscience1:23:03 – Micro and macro – emergence1:29:35 – Agency and intelligence


Transcript

Eric, a fresh congratulations to you and Kevin, a stale, I suppose if two years to three years to innate was written in 2018, right. Or published, I don’t know when it was written and Eric a fresh congratulations to you. Although I know that the revelations, your book has been a long...]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 111 Kevin Mitchell and Erik Hoel: Agency, Emergence, Consciousness]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2021/07/111-art-01.jpg" alt="" class="wp-image-1261" />



<div class="wp-block-image"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></div>



<ul class="blocks-gallery-grid"><li class="blocks-gallery-item"><img width="1024" height="734" src="https://braininspired.co/wp-content/uploads/2021/07/erikHead-2-1024x734.jpg" alt="" class="wp-image-1267" /></li><li class="blocks-gallery-item"><img width="618" height="770" src="https://braininspired.co/wp-content/uploads/2021/07/kevinHead-1.jpeg" alt="" class="wp-image-1268" />Kevin Mitchell Pic Paul Sharp/SHARPPIX</li></ul>



<p>Erik, Kevin, and I discuss… well a lot of things. </p>



<p>Erik’s recent novel The Revelations is a story about a group of neuroscientists trying to develop a good theory of consciousness (with a murder mystery plot). </p>



<p>Kevin’s book Innate – How the Wiring of Our Brains Shapes Who We Are describes the messy process of getting from DNA, traversing epigenetics and development, to our personalities. </p>



<p>We talk about both books, then dive deeper into topics like whether brains evolved for moving our bodies vs. consciousness, how information theory is lending insights to emergent phenomena, and the role of agency with respect to what counts as intelligence.</p>



<p></p>



<ul class="blocks-gallery-grid"><li class="blocks-gallery-item"><img width="150" height="150" src="https://braininspired.co/wp-content/uploads/2021/07/innateThumb-150x150.jpg" alt="" class="wp-image-1262" /></li><li class="blocks-gallery-item"><img width="150" height="150" src="https://braininspired.co/wp-content/uploads/2021/07/revelationsThumb-150x150.jpg" alt="" class="wp-image-1263" /></li></ul>



<ul><li><a href="https://www.kjmitchell.com/">Kevin’s website</a>.</li><li><a href="https://www.erikphoel.com/">Eriks’ website</a>.</li><li>Twitter: <a href="https://twitter.com/WiringTheBrain?ref_src=twsrc%5Egoogle%7Ctwcamp%5Eserp%7Ctwgr%5Eauthor">@WiringtheBrain</a> (Kevin); <a href="https://twitter.com/erikphoel">@erikphoel</a> (Erik)</li><li>Books:<ul><li><a href="https://www.amazon.com/gp/product/B07CSHZRGN/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=B07CSHZRGN&amp;linkId=31d60129dc19af78bb1b5851e69f1db8">INNATE – How the Wiring of Our Brains Shapes Who We Are</a></li><li><a href="https://www.amazon.com/gp/product/1419750224/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=1419750224&amp;linkId=d9e2348c2893510421c9f2afc94442e7">The Revelations</a></li></ul></li><li>Papers<ul><li>Erik<ul><li><a href="https://arxiv.org/abs/2004.03541">Falsification and consciousness</a>.</li><li><a href="https://www.hindawi.com/journals/complexity/2020/8932526/">The emergence of informative higher scales in complex networks</a>.</li><li><a href="https://arxiv.org/abs/2104.13368">Emergence as the conversion of information: A unifying theory</a>.</li></ul></li></ul></li></ul>



<p>Timestamps</p>



<p>0:00 – Intro<br />3:28 – The Revelations – Erik’s novel<br />15:15 – Innate – Kevin’s book<br />22:56 – Cycle of progress<br />29:05 – Brains for movement or consciousness?<br />46:46 – Freud’s influence<br />59:18 – Theories of consciousness<br />1:02:02 – Meaning and emergence<br />1:05:50 – Reduction in neuroscience<br />1:23:03 – Micro and macro – emergence<br />1:29:35 – Agency and intelligence<br /></p>


<div class="lightweight-accordion"><span>Transcript</span><div class="lightweight-accordion-body">

<p>Eric, a fresh congratulations to you and Kevin, a stale, I suppose if two years to three years to innate was written in 2018, right. Or published, I don’t know when it was written and Eric a fresh congratulations to you. Although I know that the revelations, your book has been a long time coming and has, uh, you know, this is your first novel, correct.  </p>



<p>Erik H    00:03:50    It’s my first novel and yeah, about 10 years is probably the, it’s probably the lower end of the estimate. Although of course the bulk of the writing was done in graduate school,  </p>



<p>Paul M    00:04:04    Which is crazy. This is not 10 years.  </p>



<p>Paul M    00:04:08    Well, I know people, um, and there’s nothing wrong with that. Nothing wrong with 10 years in graduate school, everybody. Um, but so this is the, this is the first on, on the podcast, I believe where, uh, we’re going to talk a little bit about a fiction book. And so we’ll, we’ll spend some time talking about the revelations. I want to talk about innate a little bit, Kevin, and then we’ll come together and you guys will really butt heads about some of your, uh, some fundamental core concepts and ideas that you hold dearly. Um, so, so  </p>



<p>Erik H    00:04:36    I’ve been listening to more of Kevin’s like talks and so on. And my suspicion is actually that we agree on basically everything. So, uh, it may, it may be more of a, kind of more of a head shake. Exactly.  </p>



<p>Paul M    00:04:53    So this, this is, to me, this is the conversations I aim for. I actually find debates mostly utterly on useful, but, but anyway, uh, we don’t need to get into my thoughts on, on debates. So Eric, among other things in, in the revelations, um, it’s just really impressive the, the scope of the philosophy and the science that you cover throughout the novel. And it’s a, it’s a long novel. So you do have some time and space to do that, but, but it’s also just filled with these really vivid descriptions of the science, um, and this, so this is maybe I should back up and say that the novel revolves around characters, uh, pursuing a theory of consciousness and then things happen in the background. Um, like there’s a murder mystery involved as well. But like I said, there’s lots of vivid descriptions of the science, but also the characters that what they’re going through emotionally and their thoughts and the scenes and, and the writing is, is generally, uh, just fantastic. And so congratulations on writing. Thank you  </p>



<p>Erik H    00:05:55    So much. That’s awesome to hear on,  </p>



<p>Paul M    00:05:57    On the other hand, I’ll say I felt almost like an old curmudgeon, partly because I am these days, but I kept wondering why these, uh, these immature, you know, children, uh, and the novel that of the characters were so like pretentious, you know, in self-absorbed in their quest to, uh, to understand. And I found myself grateful that I didn’t know many people who, who, uh, held that held their own, um, held their own ideas in such high regard throughout my own, uh, academic work. So, um, so I am,  </p>



<p>Erik H    00:06:32    I think that that’s a very viable take, right? Um,  </p>



<p>Paul M    00:06:37    From a combat curmudgeon, especially the youth energy.  </p>



<p>Erik H    00:06:42    Yeah. So a lot of the novel is about, um, you know, when there are dominant kind of modes of thought, particularly within academia, oftentimes there are people who really, you know, chafe chafe against that. And in some sense, it’s kind of necessary. It’s very similar to the fact that, you know, no one who is a billionaire didn’t think when they were a kid, I’m going to be a billionaire, but there are a lot of people who you don’t hear about who thought that the same thing, and they’re not billionaires. Right? And so every billionaire has this, you know, self-reinforcing narrative of like, well, I just believed it from when I was very young and look, it all worked out and the same is probably true for scientific theories, particularly sort of grand sweeping scientific theories. You know, one of the, the main characters or the main character, Kirk is looking for a scientific theory of consciousness, which he thinks there needs to be a scientific theory of even that is in itself debatable.  </p>



<p>Erik H    00:07:49    Some people disagree, but, but he thinks this and he, you know, is kind of mano maniacally throwing himself against the problem. And, you know, I want it to draw similarities between the sort of anxiety that drives. I think probably a lot of those discoveries and things like, you know, creative endeavors, like writing a novel or becoming a painter, or, you know, any of these other things that we think of as involving, you know, almost an artistic, romantic level of obsession. And I do think that probably, you know, at the upper levels of science, there is a lot more ego and humanness going on, then it would be apparent from the outside. And my goal wasn’t to say that the character Kirk, who is very strident in his opinions so much so that he can sometimes be off-putting, uh, is correct about everything, but I, I’m also not of the opinion that, that he is kind of irredeemable or that this is actually a very bad thing. I think in some cases it can be a very good thing to be like that.  </p>



<p>Paul M    00:09:03    Yeah. I mean, the other striking thing, reading the novel, and of course I’m coming at it from a neuro neuroscientist perspective, and I also knew, uh, your background or, or at least some, some there in just looking through your publications. And so there’s a lot of overlap and I found myself, so I actually thought, um, I learned through another interview that you gave that the, that Kirk K I E R K is Kirk as in Kierkegaard. But I thought early on, I thought, oh, that’s an anagram of Eric with an extra letter. And because there are these parallels, a lot of overlap between the nonfiction of your life and the fiction of what happens in the story. Um, and so I’m kind of wondering, because there are such great descriptions. I think I even, uh, tweeted, uh, early on when I was reading the book, it’s like, you, you like captured the world of non-human primate research. Like so, well, you, you couldn’t have done that, had you not experienced those things. And, and, you know, a lot of like the social aspects and stuff. So it makes me wonder where their fiction ends in the, or where the nonfiction ends and where the fiction begins in the novel.  </p>



<p>Erik H    00:10:09    Well, it’s, it’s a good question. And I’m not even sure that as an author, I have a definitive answer that can partition out that boundary, you know, appropriately. I think all authors draw a lot from their own experience and lives. Mine just kind of happens to be in this, um, you know, world of like high performance science, where there is a lot of, you know, stress. And there’s also a lot of really interesting things going on. I mean, you, all those descriptions, you know, in the novel, they, these young scientists are coming to New York to study consciousness at New York university. They’re in a building, which is called the center for neuroscience or CNS, which is very maze, like, so they’re always getting lost in the CNS. And, you know, there are primate labs in the building and even the dynamics between, you know, young people meeting each other for the first time can often be primate ask.  </p>



<p>Erik H    00:11:08    So there’s kind of a mirror between the monkeys, you know, and the people and how they interact. And, you know, that’s all based off of, there is a real primate research facility at the CNS at New York university. Um, and it is a realistic depiction of the sort of animal research. And I didn’t want to spare, you know, some people have, have said some, some reviewers have said this, this is a really dark, well, very interesting, very dark descriptions of what’s actually going on. And I didn’t want to spare that because I do think that science alone is thematically interesting enough and dark enough to, uh, to get the sort of literary treatment. I mean, it is an absolutely insane thing to drill open a monkey’s brain lower in a gigantic microscope, into it, having, you know, possibly reprogram the cells in the brain to respond to light and then flash light, you know, over the dark cortex and cause various, you know, uh, effects.  </p>



<p>Erik H    00:12:19    And that is a, um, that kind of mechanism and work is so, uh, thematically dense to me that I felt I could only capture it if I drew from real life and real experiences. You know, I, I do think that there’s, there’s no way to cleanly separate it, but I would also say just to quickly beg off like the, the, the, the quick Kirk to Eric comparison that, you know, a lot of other characters also have a lot of me in them. So like, Carmen, for example, yes. Kirk and I went to the same graduate school. Uh, so one might say, well, isn’t it kind of obvious then, but like Carmen went to Columbia university and I worked at Columbia for years. So, you know, th th th her experiences at Columbia are, you know, me observing things that go on at Columbia, right. So, you know, a lot of, you know, a part of me ends up in all the characters and I, and I, and I, I wouldn’t say that there’s some sort of like immensely director obvious mapping.  </p>



<p>Paul M    00:13:22    I want to bring Kevin in a little bit, because I have a bunch of questions about just the process of writing and, and what jumped into my head while you were talking about, I didn’t think about this before, but I’m curious Eric about writing the, how, how the process of writing, the more non-fiction like the descriptions of monkey facilities and things of that nature that you’re, uh, ha have a lot of experience with it. Was that faster? Was that harder or easier or faster or slower than the parts that you were more you had to generate in your head?  </p>



<p>Erik H    00:13:58    Yeah, I would say probably somewhat similar. I mean, you know, in terms of speed, the speed aspect, I mean, I’m sure Kevin can speak to this in the sense that I do think that nonfiction feels a very particular way. And when you’re writing fiction, you know, the, the goal is, is almost to somehow like transcend the subject in a particular way. Like in the end, the literary eye has to have a final say when you’re talking about fiction, like art is like the laughter, like after everything is done, right, like the universe gets run out. It’s just after heat death, and it’s cold and it’s dark, but art kind of gets this last, like chuckle, like at the end. And I think that that’s true, like in general, like when you’re trying to describe anything that art kind of has to make, maintain superiority, but it’s very dangerous to go into nonfiction thinking about that sort of like romantic, poetic angle. Um, and you kind of have to go in very dry and very, um, very skeptical and like non-romantic at all. So really it’s much more about turning on and off, like the more romantic parts of my brain when I write either, or,  </p>



<p>Paul M    00:15:15    But, but you still have to go in, you know, trying to write a readable book in non-fiction. And I realized too, you know, there’s different types of non-fiction because writing a book about the history of dog shows, for instance, uh, you’re not really writing about the unknown and much of the nonfiction that you guys both write, uh, in Kevin and in innate, for instance, is all about, it’s partly about what is known, but it’s vastly about what is unknown and how we might be able to start knowing what is unknown and making connections. And I mean, so, so innate, uh, is a lot of people will have already read it, I’m sure, but it’s kind of about the connection between genetics and development and him psychology. So from like the really low level, uh, code DNA code to our psychological traits. Um, and so in, in a sense that’s hard to pull off writing a readable non-fiction. Yeah,  </p>



<p>Kevin M    00:16:10    Yeah. I mean, and, and, and the goal, I guess, I mean, I think of, uh, you know, fiction writing as, um, as very evocative, you know, and really making, making the words, dance and making, making images of, uh, you know, appear in people’s minds in a, in a, in a sort of, uh, a dialogue where the, the author is not necessarily, um, you know, directing everything, but, but more sort of inspiring a response in the reader, whereas, um, certainly what I was doing. Uh, it’s, it’s not really evocative in that sense. It’s really a, I guess not making the words dance, but maybe making the March, uh, you know, trying to be clear enough that there’s a sort of a, it’s almost a kind of, I feel like it’s a kind of mind control almost, you know, you really have to have this sort of constant empathy for the reader to say, okay, well, what did they know?  </p>



<p>Kevin M    00:17:00    What are they thinking? If I say this, how are they going to take it? Nevermind what the words I’ve said, how are they going to take it? And then do I need to explain something else? Um, do I need to give them, you know, a, a metaphor, something familiar that they can, um, take as a, a scaffold for an idea that, you know, that I’m trying to build up here. And, um, so yeah, I mean, I find actually writing the book, writing that kind of non-fiction for nonspecialists really, um, fun, uh, but it’s a different challenge to writing, you know, academic papers for very specialist audiences, where you can rely on the crutch of shared knowledge and shared perspective and shared assumptions. Um, and sometimes the goal in writing the, the interdisciplinary stuff is, is to question those assumptions from any, from any individual and, and, um, I guess try to recast, uh, things so that the relations between, like you said, genes and brains and minds, um, can be seen in it in a particular perspective. And the perspective I was taking in Nate is a developmental one. The argument was that the only way to make sense of that relationship is through it, through the prism of development  </p>



<p>Paul M    00:18:14    And the take home. First of all, two things, one, the take home is that everything is messy, yes. And development. And so that’s one of the take home take homes in the, in the book is just, you have genes, you have psychological traits, they’re not a one-to-one mapping. Everything is super, super messy. And we have forgotten about development as a process that, um, gives rise to us just tons of variation in itself.  </p>



<p>Kevin M    00:18:37    Yeah, exactly. The idea that there’s some genetic variation in our psychological traits is, is nothing new. Um, but what I think was the message I was trying to get across is that that variation is realized through the processes of development. And like you said, there’s not, there’s no one-to-one mapping between, you know, a gene for this and a gene for that cognitive traits. It’s really complex polytehnic, um, and very indirect and, and sort of emergent relationships between those. And yes, the processes of development themselves are noisy in molecular terms. So you can start with the exact same genome and you won’t get the exact same outcome every time you run that program, basically. So that’s why, you know, identical twins don’t look exactly the same and their brains are not exactly the same as each other and their minds are certainly not exactly the same. So, yeah, I think development is that, um, third source of variation in our individual natures, that’s often overlooked and, uh, and an inner sense, a really important one because it, it defies any predictability. It, it will never be predictable. It’s just inherently stochastic. Um,  </p>



<p>Paul M    00:19:49    That’s a really down, it’s a real downer, you know, that well, in a sense,  </p>



<p>Kevin M    00:19:53    Uh, you know, it puts a limit. It’s interesting, cause it puts a limit on, um, on genetic predictability, for example. So the, the, uh, claims that, you know, you’ll be able to sequence someones genome or sequence the genome of embryos and, and, and really predict with some accuracy what their psychological traits is going to be like, you just won’t ever. And it’s not just that we don’t know  </p>



<p>Paul M    00:20:18    Designer babies.  </p>



<p>Kevin M    00:20:20    Yeah. Designer yes. Designer babies. Yeah. Sorry. It’s not just that we don’t know enough now it’s that we never will. There’s a firm limit in principle on how much we could ever predict from that.  </p>



<p>Erik H    00:20:30    Kevin, do you, do you find, um, that it’s harder to attract attention arguing like a non-fiction perspective wherein you’re arguing from, uh, like a more skeptical perspective? Like, like what you just said right. Is a lot easier. It’s a lot easier to sell the opposite.  </p>



<p>Kevin M    00:20:51    Yeah. I, I do think, um, yeah, I do think that a more sensationalist point of view, uh, probably would sell more books than a, than a more the, the, the nuanced kind of picture that I’m trying to show, which is complex and it is messy. And at some point you just have to, you just have to accept that, but it doesn’t, um, you know, there aren’t that strong headlines, like, you know, a strong headline is DNA is your future. That’s a great, that’s a great newspaper headline that people are gonna want to read that. Whereas, you know, the headline, well, it’s complicated that doesn’t, uh, doesn’t necessarily jump off the page in the same way.  </p>



<p>Erik H    00:21:32    Yeah. Right. Yeah. It’s, it’s something I’ve been struggling with, you know, myself. I mean, I think that the defining feature of like good literature is it’s agnosticism, you know, you’re, you’re kind of agnostic about, about almost everything when you’re writing, like, you know, whether or not this person is a good or bad person, you have to be agnostic about that. Um, and, and, and whether or not, you know, in, in the revelations, you know, it’s, it’s very much not that one particular hypothesis about how consciousness works is advanced. Right. All the characters have a different take on that. And then when I go to write non-fiction, it’s very much not agnostic. Right. Like you have to put that aside and take some sort of strong, you know, you’re, you’re either a hardcore atheist or you’re like a fundamentalist Christian, or what have you. Right. And that, then you kind of argue firmly for this belief. And the agnosticism kind of is, is left aside. I don’t know if you’ve ever had,  </p>



<p>Kevin M    00:22:26    Yeah. I mean, in the sense, I guess what I was doing was, was taking a strong position against extreme positions. So I was arguing against genetic determinism, but also against, uh, naive empiricism where everything is. So, so in a sense, there’s an argument, you know, innate is, uh, argumentative in that sense, it’s advancing a set of ideas. It’s just that they’re their ideas in the middle. They’re not, they’re not ideas that one or other extreme.  </p>



<p>Paul M    00:22:56    So, so for instance, Eric, I know that you consider yourself a, a writer first and, uh, an author first, perhaps, and a scientist. I don’t want to say second, but, um, because you’re, you’re, you know, fully engaged in science also, but I know from an early age, you have always bought, wanted to be a writer. So in some sense, you consider yourself a writer first. Um, the reason why I’m bringing this up is because I have this weird, what I really want to know is the right, uh, cyclical, um, pattern for me and for, for anyone to like work on something and then walk away, but not just walk away, work on something else. Right. And then coming. And then when is the right time to come back to the thing that you were originally working on and you have three or four different things you’re working on, what is the right pattern?  </p>



<p>Paul M    00:23:42    Because when you do that, when you go work on something else, whether, you know, sometimes it’s just taking a walk, but often it’s also diving deep into another subject. And that’s when the real insight comes relative to your first for the thing that you’re mainly working on. And there must be some, uh, some right answer for people, you know, in particular for each individual, for how long and what debts to work on something before moving on to another thing and coming back and revisiting and cycling through. And I’m wondering what your experience is. If you found the right rhythm, um, with, with your writing and Kevin, this applies to you too, because writing nonfiction is a different beast and how much that informs your science and vice-versa  </p>



<p>Erik H    00:24:24    Yeah. The description of the cyclical nature of that is key. You know, the reason I’ve described myself as a, as a writer first is one that, of course I want people to take the book seriously. I mean, I think that there’s the standard take is something like, oh, look at this a time, you know, uh, but you know, who, who somehow found the time and like was incredibly lucky to get it published. And it’s like, no, you know, if you look historically, I grew up in an independent bookstore that my mother owned and I worked there as a teenager. And so I always loved books and was surrounded by books and wanted to be an author first and foremost. But, um, all my favorite writers wrote about, um, really drew from their background and experience, you know, um, somebody like Herman Melville going off and writing type, he, after he gets stranded, amid the cannibals, you know, or Joan Didion finding the counter-culture right.  </p>



<p>Erik H    00:25:18    And it’s like, well, I th I didn’t know where the counterculture was, you know, when I was 17, but I did find the non-fiction section of the store. And I realized that I could actually be pretty good at this. Um, and that the world of science was itself incredibly interesting and rich, and really had been under studied from like a literary slash humanities perspective. You know, you can go back and read like CP snow’s the two cultures, um, you know, a famous lecture where he’s just bemoaning the lack of communication between the humanities and the sciences. And I think that that’s still true today. And, you know, I did, you know, have this idea for the book you, you know, even in college. Uh, and I ended up pursuing my PhD in neuroscience, at least partly in order to kind of, I knew that I would need to have that background, as you said, so much of the book, you know, draws from reality and has that ring of hopefully has that ring of truth to it.  </p>



<p>Erik H    00:26:17    And that’s from, you know, getting literally going and getting my PhD with all that said, just to answer your question about how to balance those things. Um, I think that it’s, it, it, it is very, very helpful to work in cycles. I don’t think that there’s any way that someone can completely appropriately balance two very, very different fields. And I’m sure this even applies within Kevin’s own scientific research, which draws from various interdisciplinary aspects, because just doing deep dives into something, and then coming up is always going to be better than doing something shallow. So there have been periods of time, you know, even like a year where I just have not written a word of fiction and then I’ll start writing again. And, you know, it’ll all just kind of pour out. So I’m, I’m very, very much in the cyclical nature. I wish I could systematize it and tell people some sort of good, you know, system to do it. Uh, but I think it it’s, it’s, it’s, it’s, it’s almost, uh, it’s almost always by the seat of my pants.  </p>



<p>Kevin M    00:27:23    Yeah. Yeah. I mean, I, I would, um, yeah, say something similar. I think the, um, the cyclical nature for me is somehow enforced by reaching a point where I’m just not making progress, you know, where I’m just trying to write something and I just can’t, it’s just not flowing and I just need to go away and do something else. Um, it’s often when I written myself into a corner and actually I need to pull, I need to pull back from that. And, um, yeah, like, you know, going for a walk or something like that, or even just not looking at it for a week or so, and, and coming back fresh. Um, but also I think before I start writing, there’s a long, long period where I’m just kind of ruminating on stuff and I’m reading loads and loads of different things, um, kind of thinking about them.  </p>



<p>Kevin M    00:28:11    I may be making notes and stuff. I might not even ever go back to the notes, but there’s sort of an active process there of saying, yeah, here’s an idea that I can use. And, and, um, I mean, the ones I really get excited about, I guess, are aware, I’m trying to explain one thing, and there’s an idea that comes from another field, but actually it’s, it’s very much the same process. And so it’s, uh, uh, you know, some, some kind of insight that I can use there that is a kind of a cross-fertilization of, of ideas. Um, that’s, for me, that’s the really intellectually satisfying stimulating part of doing the research for these kinds of books that are trying to make a synthesis of, of different fields. But then at some point I get to the stage where I’m like, yeah, okay. I think, I think I can start writing. I’m just going to start. Um, and then, um, yeah, I just carry on until I hit a wall again,  </p>



<p>Paul M    00:29:06    Why are you ready for the, uh, the big showdown, the big debate,  </p>



<p>Kevin M    00:29:10    Should we think we’re going to vigorously agree?  </p>



<p>Paul M    00:29:14    Fine. I’ll disagree. What is it? So, uh, you guys, apparently at least on the surface differ in the answer to the following questions, to the following question, what our brains for, and in this corner, we have Kevin who says brains are for action. And in that corner, we have Eric who says brains are for consciousness. And I don’t, I don’t know this was just, it happens. This was a, for some reason, I, to be honest, I don’t know why I ever go on Twitter, but I saw that there is an exchange between you two on Twitter and it was this exchange. So I don’t know what the backstory is and how tongue in cheek it is.  </p>



<p>Kevin M    00:29:55    I can’t remember. I can’t remember either. I do. I do remember I’m disagreeing about Marvel movies, but the, um, yeah, the consciousness versus action thing. I don’t, I don’t think that’s, I don’t think it’s a real versus there. I think it’s a, well, for me, I think if you look in an evolutionary sense and you say what our brains, or more basically what our nervous systems for, I think they’re for homeostasis actually, therefore keeping, helping the organism keep itself going right to keep its, its all of its systems in the optimal operating range. And um, one way the organism can do that. Of course it’s sort of regulating its internal physiology. So reconfiguring its biochemistry say, um, but another way it can do it is um, if the, you know, if it can’t locally adapt to its environment is to move. So it, but, but the goal of it moving, uh, is to restore itself to homeostasis.  </p>



<p>Kevin M    00:30:55    So for example, it might be hungry. That’s a signal that its internal physiology is, is a skew out of the range that wants to be in. Um, and then it can move in order to find food. So a goal-directed reinforced, um, kind of behavior. And of course, you know, simple organisms, single single cell creatures like bacteria and amoeba do that. But nervous systems in, in multicellular creatures are really good for, um, you know, coordinating action across the, across the whole organism. And um, and of course they have built up this, this, uh, power of information processing. So rather than just having a single trigger that, you know, you go towards you just doing chemo taxes, you can assess multiple things in the environment and you can integrate that information and build up a picture of what’s out in the world and even predict things about what’s in the world and take action that anticipates things and so on.  </p>



<p>Kevin M    00:31:53    So you can get this sort of elaboration over time that nervous systems allow, um, especially when they have a kind of a hierarchy that gets processes more and more abstract things about the environment. And the ultimately I would say that ends with consciousness where what it’s processing is the goals and beliefs and desires that are being represented or enacted within another part of the nervous system. So for me, consciousness is the ultimate means of in a sense controlling action. And it’s still in favor of the, or in the service of this ultimate goal of keeping the organisms persistent.  </p>



<p>Erik H    00:32:34    Yeah, I think, I think just to follow up on this and maybe, maybe I’ll, I’ll try to say something radical. So Kevin and I actually have something to, to, to, to disagree about it, you know, to, to be like as, as, as radical as possible, um, you know, a big concern of mine and this is of course merit and you know, some of the, the, the characters in the revelations is this idea of a scientific theory of consciousness. And the way that I would maybe phrase this is that right now neuroscience has very good correlational, uh, measures. So we can do pretty good job in neuroimaging of correlating someone’s experience to some particular brain state, but we have one would say no lawful, uh, measures. So, so we have no lawful way to, or relations. We have no lawful way to relate, uh, you know, some ongoing neural activity to a particular conscious experience like, you know, classically the redness of red or the feeling of pain or so on.  </p>



<p>Erik H    00:33:36    And if we don’t have a, and my suspicion is that if we had a lawful way to do this, it would become very apparent that most of the brain’s kind of architecture and design is in instantiating is based around instantiating. This, you know, basically stream of consciousness, which is kind of this dominant information flow within the brain and that everything is kind of subservient to it in that it’s feeding it or manipulating off of it or so on. And that this would be very, um, this would probably be, uh, a big paradigm shift just in the sense that a lot of previous results would maybe look pretty incommensurate. And the reason why I say that the goal is consciousness rather than behavior, is that, um, you know, certainly I think that you need behavior to develop consciousness, you know, and all these things. But I do think that it is true that most of the time for conscious experience, a lot of the world just does not actually filter as directly into behavior.  </p>



<p>Erik H    00:34:46    In other words, it takes an immensely convoluted route, you know, in the end, we’re all Darwinian. And, you know, the, the reason that, you know, organisms survive is because of the way that they behave, right? Like that, that is in controvertible. But I think that there’s a very, uh, a view that I don’t agree with, which is that consciousness is very minimalistic or almost epi phenomenal, or you’re only conscious of as tiny, tiny top of the iceberg and everything else is going on. I think that that’s wrong. I think probably, you know, um, even organisms that we think of as having not very complex behaviors are, might have a very definite consciousness. And that, that is the thing that much like for ourselves that feels like it’s kind of in control and so on. And so, um, you know, this, this view you is, is, you know, in my, in my opinion, not quite, um, as popular as it should be.  </p>



<p>Erik H    00:35:45    I mean, and I would say that as a bit of an understatement, because if it’s true and I’m not, you know, what one could argue against it, but let’s just assume for a moment that it is true. Then the question of lawful the question of lawful relationships between, you know, neural, um, uh, you know, neural states and experiences, it becomes incredibly powerful and probably would in my, and probably sweep away a lot of contemporary, you know, neuroimaging or the way that neuroscience has done or so on. So that’s why I stress this consciousness as primary approach, because it just is a change in it’s just so different or at least somewhat different than how, you know, this, maybe the staff, maybe the average neuroscientists, although that’s very hard to figure out, right. Like what the average neuroscientists view is.  </p>



<p>Kevin M    00:36:30    So I would agree in one sense that first of all, that we don’t have any good, good theoretical grounding of consciousness really, or there’s hardly much good theoretical grounding in neuroscience, generally, it’s, it’s kind of theory free. Really. I disagree hanging on. I disagree. It might be good theory free, but there’s a lot of theories. Well, there’s a lot of theorizing. We don’t really have a, there’s not a, you know, and this goes for biology overall, actually we have one good theory, which is the, the theory of evolution by natural selection. That’s our bedrock and everything else around, you know, beyond that is, yeah, there’s loads of theorizing. Uh, but I don’t think we have a solid framework. We don’t have a standard model, but I’m not, I don’t, I don’t mean to be dismissive of the work of theoretical neuroscience is just, hasn’t gotten there yet.  </p>



<p>Kevin M    00:37:20    I, I actually think it’s really important to be work to be doing. Um, but I would say, so the way that I think we’re going to get to, um, a principled understanding of consciousness might be, uh, in, uh, in, in following an evolutionary trajectory to say, okay, well, if we think of nervous systems, as, as you know, they’re two as control systems, basically, um, then what does consciousness get you in that context? How, you know, how, how does it better, uh, allow an organism to control itself by having that kind of a subjective experience of something going on? And when we get to humans, I think the idea that we’ve got what effectively is metacognition, you know, the, so that we can, it’s not just that we’re having a conscious experience it’s that we can introspect about our goals and beliefs and desires in a way that gives us actually some control over them.  </p>



<p>Kevin M    00:38:18    Um, I think that’s, um, a powerful sort of maybe addition to, um, to the arsenal that an organism can bring to bear in terms of maximizing its agency and autonomy over time. But, you know, when you’re thinking about, say a fruit fly and I agree that a fruit fly probably has some kind of fruit fly consciousness, then, you know, the question is what does it, what does that feel like? But also what does it get the fly above and beyond the call, the control systems that are already built into its nervous system. Why does experiencing something help there? And I don’t, I don’t know what the answer to that is.  </p>



<p>Erik H    00:39:01    Well, what’s so funny is that if you ask a purse, if you asked like an average person on the street and you ask them, like, what’s the point of consciousness, right? They’ll give you a very functional definition based off of things like, well, uh, you know, if something hurts, you’ll move away from it. Right. And if something feels good, uh, you’ll do more of it. Right. And things like that. And, um, you know, to be honest, I’m not entirely convinced, we’ve even been able to supplement that in, you know, the sort of artificial brains that we make with things like deep learning, right? Like we’re kind of cheating where, where we’re, we’re kind of, I mean, we were basically just doing calculus about the input and output and figuring out, you know, what, what, what inputs you need to get to the output that you want.  </p>



<p>Erik H    00:39:48    That’s obviously not how biology is doing is doing this. It’s doing it off of somehow conscious experiences. Like don’t, don’t ask me how, or at least that’s what it appears to be. And so kind of there is this very naive functionalist approach, which is just that, you know, organisms have experiences. They have some sort of spectrum of, um, you know, AF fact along those experiences. And then everything they’re doing is kind of trying to maximize or minimize their, their effect along them. And that is I think, a viable read and might at some point reassert itself as a very viable read, if you got a good, you know, lawful relationship theory,  </p>



<p>Kevin M    00:40:34    I think the only problem with that, Eric is that the, you know, you can build a lot of those sort of control systems into, into robots. For example, you know, you can make a robot, uh, withdraw from things that are damaging and you can make a robot go towards things that are rewarding. You can build reward and punishment into, into the circuits. And of course in, you know, and when we look in animals, you know, we can do these, these experiments now in, in mice or monkeys or other things where we’re tweaking those circuits. And we’re literally changing, not just, you know, not just controlling what the animal is doing, but controlling its cognitive states controlling what it’s, what it’s thinking basically. Um, but it’s all, you know, you th the, the counter argument would be to say, well, okay, all that machinery is there, right?  </p>



<p>Kevin M    00:41:18    You’re, you’re, we’re going to send a reward signal that we’re going to send a punishment signal. The circuits are going to interpret it this way or that way. Um, and the question is, why does it, why does it help for that to feel like something as like, if you build it in a robot, is the robot to experience that, is that the, you know, where’s the, where’s the magic sauce coming from, um, that, that converts the neural processing into a conscious experience. And why does it help, you know, is it just that it’s broadcasting the signal to the whole nervous system? And that’s just what that feels like? Is it, does it inevitably feel like something, or does it add something functional to the, uh, to the mix?  </p>



<p>Erik H    00:42:01    Yeah, let me maybe push back a bit on that initial assumption that we do have very good models for how it, for how it works. So, so one is, I would say, you know, maybe for some very, you know, maybe for like a cliche, you know, or something like that, we have very good neural models. The moment you get into something like mice, I honestly do become somewhat skeptical of, you know, a lot of, kind of the contemporary approaches. And now I know that that sounds like me just like throwing out the whole thing, but let me just give a, give an example of, I don’t quite think that if I, when I have like a Rumba, right? Yeah. My Roomba’s doing various things that look like it’s trying to avoid or go back home, you know, or so on. And I think that it could very easily be that we’re kind of fooled by this where we think, well, this is a very reasonable explanation for like an animal’s behavior where it has some sort of almost like pre-programmed algorithmic, you know, um, uh, very simple thing inside of it.  </p>



<p>Erik H    00:43:07    That’s, you know, going to be governing it basically, you know, in the same way. And you can say, well, maybe that’s almost like a bit of an illusion, um, which is, you know, and that actually what’s going on inside an organism is like vastly more complicated and somehow rooted or grounded in the actual experiences. It’s having an, a Roomba just kind of apes that, and, you know, well, we can open up the Rumba and do experiments and we can open up a mouse and do experiments. Maybe they’re just actually kind of a fundamentally different natural kind. And you, you could have a whole behavioristic science of Roombas and our current techniques would work very well. But if you did it for mice, maybe the current techniques would only kind of seem to kind of work well enough that you can get publications and, you know, do various other stuff, but they’re not actually super effective as like a view of it. And again, this is, I’m not saying that this is a hundred percent true. I’m saying that I don’t find this view represented enough, just a really kind of like hardcore consciousness, first view of things. Wow.  </p>



<p>Kevin M    00:44:12    I mean, I think so. I wouldn’t necessarily, I guess I’m very sympathetic to the idea that the, uh, the approach I just described there of, you know, digging into the, the, the neural circuitry of, of animals and tweaking this way in that way and showing that you can control the behavior. Uh, I, I don’t think that gives us understanding necessarily we can get control without understanding of what the organism is doing. And I think the reductionist sort of approach really mechanistic is just fundamentally wrong. I think these are, you know, we have to think of these as, as organisms in, uh, in concert with their environment as, as ongoing processes, really that have a kind of causation that’s not just instantaneous and mechanistic. And that, that sort of view is not really very, um, very popular. And I see in a really vague sense how consciousness will somehow fit into that kind of model better than a mechanistic one. But I mean, I don’t see exactly how obviously if I did, uh, we’d have a much better understanding of what consciousness is and what it’s for, then we, then we currently  </p>



<p>Paul M    00:45:20    I’ll bite on, on Eric’s radical statement. I think that you like loaded up the crazy train and headed out by, by, uh, going with the, uh, I’ll just say something radical to, by, by going with a consciousness first approach. I mean, because there are so little evidence that, uh, con well, there’s so much evidence, so here here’s my here’s, here’s my pushback, I suppose that, um, I think there’s more evidence that consciousness is the tip of the iceberg. Whereas you were saying that there’s more of the iceberg in consciousness than, than we appreciate in, in academia. I have that. Right, right. Yes. And, but the, but the evidence I would suggest, um, is, would be in my favor, I guess, or what you’re saying is like the, the rest of academia, academia is favor that the vast majority of our processing is unconscious. And we have, uh, for me, uh, I’m, I’m at home at ease with the fact that I have a very low dimensional, very minor connection to the rest of my processing. And, you know, that sliver of freewill that I don’t really have, but I think I have is, uh, just, I just barely, I’m barely conscious, essentially. I’m barely aware of like what I’m doing and not in control at all, which is not a great feeling, but I’ve become at home with it. Um, so, but, but I think that the vast majority of, uh, research would support that notion that it is just the tip of the iceberg.  </p>



<p>Erik H    00:46:49    Yeah. So, so, so I, I disagree. I think that that is a complete for you and holdover and that Freud’s immense. I mean, it just is very difficult to overstate. Freud’s immense impact on ways of thinking from about 1930 to 1970, I mean, his complete and utter domination of the intellectual scene and this idea that, that humans are driven by, you know, mainly on sub sub or unconscious processes and, you know, Freud’s theories, specific theories have now been, you know, pretty much round, you know, roundly. Um, yeah. But his, his, his focus on the unconscious and the minimization of consciousness is I think still incredibly predominant. I don’t agree that there’s any good evidence. I mean, as far as we know the only complex agential behavior that biological systems demonstrate occurs when they are conscious, you know, there is very, very few or rare cases of somehow loss of consciousness and complex behavior.  </p>



<p>Erik H    00:48:00    And they’re very questionable. It’s, it’s completely, you have to separate between whether or not you are conscious of all the information flowing in your brain, which is not at all a position that’s even worth kind of discussing and whether or not in terms of a consciousness first approach, whether or not the stream of consciousness is completely necessary for the complex behavior that the organism is demonstrating. So, and these are, these are two different things, right? And this is something like, you know, your computer would be kind of non-functional without this sort of very nice top-level user interface kind of baked into it. So, so even if it’s the case and now the computer metaphor, isn’t perfect because, uh, you, you know, so, so let’s take it as it is, but like, even if it were the case that you could claim that consciousness is relatively high level within the brains, whatever term you want, representation, information processing either like poorly defined terms that neuroscientists throw around, um, you know, whether or not the, the brain is very high level, it doesn’t really matter because what matters is if you take away the high level, does the whole thing just collapse.  </p>



<p>Erik H    00:49:10    And I would say both from a, from a naive perspective of like, just people’s understanding of other people’s psychology and their own psychology, this is true. And also there has been no good disprove and, and most of the stuff are things like working memory papers that I think are kind of poorly, poorly generalized and, and things like that. So, so I actually do disagree about the predominance of evidence.  </p>



<p>Kevin M    00:49:32    Yeah. So that’s interesting. Cause I, um, I would be more, uh, uh, long Paul’s line here, but I think there’s a, there’s a question I guess, about Eric, when you’re talking about consciousness, whether, you know, because it means so many different things. Right. And if it sounds like what you’re talking about is that it’s, it’s just, the organism is online basically that it’s been booted up and it’s active. Right. And, and as opposed to the question of whether it has to be having subjective self exp self-aware experience and be aware of itself experiencing things, um, and, and to me, I’m happy to buy the idea that the whole system has to be working for, uh, the organism to behave as it normally does. Although I think that would look and feel very different for organisms of different level of complexity.  </p>



<p>Erik H    00:50:22    Yeah. I, I certainly agree about the, the looking and feeling too to some that w w w the only thing, I mean, by conscious experience is a, is, is the Jamesian sense of a stream of experiences, which are qualitative. So the, what it is likeness. And so, and, and, you know, if you’re, if you’re, uh, if you’re giving explanations about people’s behavior, what you’ll find is that you talk entirely in terms of their conscious experiences. Now, sometimes you might say something like, well, they unconsciously wrote a letter or so on. Now the standard, by the way, just to, just to say this, you know, I think the standard reply that I’ve heard from this is that eventually it gets down to people saying something like, well, you know, when you add up numbers in your head, do you really do the addition or are you kind of handed the solution? Right. And I think that that’s actually a reasonable, uh, uh, take, but it doesn’t mean that you could actually do say, um, many of the things that you require consciousness to do unconsciously. This is the false.  </p>



<p>Kevin M    00:51:29    Yeah. Okay. But if we maybe frame it in a different way and ask about, uh, you know, decision-making and action selection, then I think all the evidence is really clear that most of the decisions, sorry, most of the actions we take, we’re not consciously deliberating about them necessarily, uh, in a, you know, in a, in a, cause it just takes too long. We’re on autopilot for most of the things that we do most of the time, because we have these constraints built in to our habits, to our holistics, to the policies, you know, decisions that we’ve made before, about what we will do in the future, um, which is all good and important because otherwise we’d take so long deciding everything that we’d be we’d have been dead long ago. So, you know, in terms of, sorry, go ahead. Go ahead.  </p>



<p>Erik H    00:52:10    Oh, I was just going to, just for that very specific example, I would say, I think that, again, some of this stuff is just linguistic, so, um, you know, your, your, your, your point is absolutely correct. Right. If we, if we cognitively kind of, you know, made high level decisions of what we think of as consciousness, but I th the trick is that I think that that’s a very different use of the word consciousness, as simple example might be like, let’s say, I reach for a glass, you know, a reach for a glass of water. Now you might say, well, did you really deliberate consciously about whether or not you would reach for the glass of water, you know, before you reached, right. And of course, everyone would say probably not most of the time. Right. And then, you know, I don’t think that one can, at that point claim victory at all, because what you can say is, listen, I kind of felt like this urge, like this whole, this whole thing played out within the activity of my stream of consciousness. I saw the water, I had an urge. I wouldn’t have really analyzed it or described it. I wouldn’t have spent much time on it. And then I, you know, consciously reached out, grabbed it. Everything is again, operating in the theater of my consciousness. And then I took a drink and I experienced the, you know, the sweet, cold liquid. Sure. That what I mean,  </p>



<p>Kevin M    00:53:24    But there’s lots other situations  </p>



<p>Paul M    00:53:25    That are even less than that. Can I give an example because I burned the hell out of myself two days ago with coffee. Um, so I have this, uh, this is gonna sound highfalutin, but I, I, you know, I put butter in my coffee or whatever, and you gotta mix it up. So I have like this air rater thing. Oh, stop grimacing, Kevin it’s. Okay. It’s, uh, it’s intermittent fasting anyway. So you stick this little wand in and it spins really fast. And I had, and kind of like air rates, the coffee mixes up the, the butter and stuff. Um, it’s unsalted butter. And I had heated the coffee hot enough so that when I put the butter in it, wouldn’t, wouldn’t get cold. And right when I started, um, air rating and I pressed the button and it goes is, and for some reason that it kind of blew up.  </p>



<p>Paul M    00:54:08    I don’t know, it was, it’s a butter reaction or something with a coffee and in that. So, so my experience was nil, but what happened was I managed to, uh, take the air Raider out. I did actually accidentally throw it across the room, but nothing was broken. Um, and, and at the end of that experience, I was thankful that my body took over and perform these complex actions without me being aware of it. And you sort of like come to and realize, oh, I just did all I, I made, you know, a lot of degrees of freedom movements and yes, uh, the air, the, the little wand is okay, but I did have to pick it up, you know, cause I, my body did kind of, what I’m trying to do is, is take it away from reflex and say it was more than reflex because I was making a complicated movement. Uh, but it was all unconscious, all my body. So just to throw that example as, I don’t know if Kevin you’re going to  </p>



<p>Kevin M    00:54:58    Yeah. Look, I mean, I think, um, again, there’s lots of things that we do on complete autopilot where we’re not even necessarily conscious of what we’re doing, uh, at all, you know, you can be driving along and, and just, uh, you know, drive to the wrong place because you’re not thinking about it at all. You’re, you’re, you’re just really not devoting much cognitive resources, uh, to that task at all, except a kind of a surveillance for, you know, danger and traffic signals and stuff like that. So, um, I guess, I mean, it’s sort of appealing to the, uh, system one system two, um, you know, dichotomy or, or a spectrum maybe, um, of, of Daniel Conaman, but in terms of decision-making, uh, you know, I think the, the things that we do consciously and deliberatively at least are a small subset of everything else, but, you know, maybe it’s maybe it’s, uh, maybe it’s semantic.  </p>



<p>Erik H    00:55:54    Yeah. I, I find it incredibly telling that the clearest example you have is basically  </p>



<p>Paul M    00:56:00    That’s what I was saying is, is that it was beyond before  </p>



<p>Erik H    00:56:03    That I agree with, right. So, so it’s very telling that, right. We, we immediately go towards something that we actually do all agree would be a kind of classic unconscious activity. Right. We should like putting your hand on a hot stove. And let me just say that, even that, even that, right, like even it, particularly in your example right now, if you didn’t already have like the theater of consciousness set up such that you knew that kind of broadly available information about where your body is, where this thing is, all these things, and then yes, maybe you do have something that happens, you know, so quickly and kind of neural timescales that you’re only going to experience it once it’s, once it’s over, but it’s still making use of all that information that you already had. So even in that case, I wouldn’t be willing to say that your conscious, your stream of consciousness had no kind of significant causal impact on, on what you did.  </p>



<p>Erik H    00:56:54    And if even that case, I feel like one can kind of possibly defend it. Then I think that when we get to more complex cases, it becomes more and more obvious. And again, I think that some of this stuff, you know, as you say, Kevin, it is semantic, right? Where one has to say a precisely what one means. But I think by focusing on this notion of a stream of consciousness and the degree to which that information is used right, and necessary, then one gets, I think, closer to kind of both, interestingly enough, closer to the naive perspective, which is actually probably, which is that, you know, human beings are experiencing these things and they’re just basically reacting based off of what they’re, what they’re conscious of. And, you know, they have their, their various, you know, he, Dominic, uh, pleasures and pains and so on. When you get closer to, when you take this view of brain function, you also get a lot closer to the naive view. And I find that is probably a good thing and good evidence for it.  </p>



<p>Kevin M    00:57:53    So can I, can I push back Eric, just on the, um, well, I want to roll back in time as it were evolutionarily. So if we imagine just an amoeba say, and it’s a schmoozing around the place, and it’s getting some signals from the outside about where say some bacteria are that it could eat or where other amoeba are to form, uh, a fruiting body or something like that. So it’s got, it’s got information in that. There are, uh, you know, it’s got receptors on the surface, they’re sending a signal internally when they bind some chemical. There’s a, therefore there’s a pattern inside the organism that has relative information, right. It’s physically correlated with something out in the world and it’s configured in such a way that it responds to that by say approach or avoidance. So my question is, I guess, where is it experiencing any of that is an amoeba experiencing any of that. And if it is then then fine, you would say that’s a kind of a prodo consciousness, then that’s fine. I’m happy with that. But if you think that it isn’t, then at some point in evolution, consciousness arose where it wasn’t there before, and then, and then literally its behavior first consciousness, second as an, as an add-on in, in evolutionary terms. But I’m wondering, what do you think about the, the amoeba if it’s having a Miba experience?  </p>



<p>Erik H    00:59:22    Yeah. Um, I would say that’s dependent on the final theory, right? So this is the boring answer, right? Like this is the boring answer I can give is that, you know, when consciousness arises, uh, is basically just a function of whatever this law, this lawful relationship, you know, is. And, uh, I don’t know what the lawful relationship is. Uh, unfortunately, you know, I wish I did. I, I can give some, some guesses and we could talk about like, you know, uh, uh, maybe, uh, uh, well, at least one of the bigger theories, which is like integrated information theory, uh, which is I think, um, you know, maybe it’s not well accepted, but it’s kind of well known and well discussed. And there are empirical studies that do support it. Like I, that one should be clear about that. There are, are some good studies, um, although I don’t think it’s correct.  </p>



<p>Erik H    01:00:13    Um, but yeah, I, I, it’s reasonable to say just because I worked on it, so we should have, my, my personal opinion is probably that it’s not the final theory, but it’s, it’s a, it’s a good theory. It looks kind of like what a theory should look like. And, uh, so, you know, so, so, so that, I, I can’t quite answer it, but maybe I could just say that, um, you know, the, the gap here is probably a bit bigger than people think in that, depending on what the sort of lawful relations you end up with, it could be very much that consciousness and it’s totally at the phenomenal and only looks to be, you know, it just so happens, right? That we experience something very unpleasant, right? When we are supposed to be acting like we experienced something very unpleasant, um, you know, or it could be maybe very directly causally impactful.  </p>



<p>Erik H    01:01:11    And I think for those sorts of questions, you know, we, we need, you know, theories of causation. You need theories. I know Kevin, you’re interested in like theories of emergence and, um, and, and theories of consciousness. And I think that those things need to be resolved like my own personal preference to be completely honest is that I think that emergence is very solvable scientifically. I don’t think that there’s anything in there, even in the most, even taking very seriously everything that philosophers talks about. I like classic analytic philosophy about eMerchants even taking all that stuff. Let’s just take it very seriously, even then I think it’s completely solvable. And that feels very different to me than like consciousness where, uh, you know, I have some ideas, but, uh, to, to, to be honest, I don’t have any of the same certainty that anyone’s gonna crack it, you know, in the next dozen years. I mean, maybe this is an opportunity to talk about, uh, emergence and some of the stuff maybe we do,  </p>



<p>Paul M    01:02:07    Let’s, let’s go because I want to make sure we have enough time to go down that road. So, I mean, Kevin, one of the reasons why you’re here right now is because you’ve been working on, um, the evolution of agency and, and the essentially of agency and Eric, you have a host of recent publications talking, uh, examining how you can use information theory, um, concepts to show that you can get more there’s, there’s more in the macro level than in the micro level. You can show that mathematically in these certain systems that you use as examples. Uh, so yeah, I mean, I just kind of introduce those topics because I just want us to go down that road of, you know, so maybe I’ll start with here, here, Kevin, I’ll quote you in a recent, uh, talk here. He’s a patterns of neural activity have causal power solely by virtue of their meaning. Okay. So where does meaning come from? Let’s go down this road.  </p>



<p>Kevin M    01:03:05    Well, yeah. Great. Good. Okay. So, um, yeah, I mean, that’s, that’s really a pushback to the reductive view that in fact, you know, as we learn more and more about how neural mechanisms work and, uh, seemingly underpin things like desires and beliefs and goals, and so on the less work there seems to be for the desires and beliefs on goals themselves to be doing anything right. Th th the mental content doesn’t seem to have any efficacy there. It’s just like, well, look, the neural circuits are firing that way, and they’re configured like this, and this is what’s going to happen. So what’s all this other stuff is just epi phenomena. And I think there’s a, there’s a strain, at least in modern neuroscience. That’s, that’s quite reductive in that sense. Um, and I think it’s absolutely backwards. I think that those patterns of neural activity only have any causal power in the system because they mean something.  </p>



<p>Kevin M    01:03:58    So the system is configured in such a way that the patterns, uh, refer to something, represent something, reflect something, either out in the world or, or internally that is, um, tied through either evolutionary wiring that’s that does that sort of pact this program for configuring the nervous system a certain way into the genome or through the individual learning of, of the organism through experience such that the, um, the outcome is, uh, you know, um, appropriate for the, the organism and in, in light of the goals that it has at any moment. So, you know, that’s a long-winded way, I guess, to, to say that the organism is behaving as an agent, because it has purpose built into it by natural selection and the simplest things, uh, you know, single celled organisms, their purpose is just to persist, but that’s how natural selection works. Things that persist, persist and things that don’t don’t.  </p>



<p>Kevin M    01:05:01    So we see the ones that do so that tendency is just wired in there. Um, you know, starting with biochemical circuits that, that maintain themselves going and, and, and then moving to, um, behavior when animals can sense things in the world that it is adaptive for them to, or avoid. And then that behavior gets reinforced over time. So in that sense, the meaning is, first of all, a pattern of information that’s correlated with something. So that’s just the, there’s nothing controversial about that. Lots of things are just physically correlated with each other. Um, but secondly, that it has some relevance for the organism in terms of its goals. So it, in terms of the purpose that it has, so it has some, some salient and some, um, and some importance, and it’s linked consequentially to some kind of action.  </p>



<p>Erik H    01:05:53    Yeah. Kevin, maybe you could say something about, um, like this, the very first thing you said right. Which was kind of framing this, which is the reductionist drive within neuroscience. Um, w how do you find it, like almost overwhelmingly common? Um,  </p>



<p>Kevin M    01:06:13    Kind of, yes. Uh, I think it’s, it’s not always explicit. It’s often just sort of implicitly there, the idea that we can, first of all, go, you know, w we can break down behavior to the circuits, the components that are driving it, and we can control them with these amazing experiments. And that sort of reinforces the idea of a mechanism at play that we can intervene on and, uh, and drive and manipulate. But then, you know, below that, there’s a, there’s an even deeper sort of idea that actually, you know, it’s the circuits, that’s all illusory. It’s actually just atoms and molecules. And it’s the laws of physics. You know, it’s a really deterministic view from physics. And this comes up in, in, in debates about free will, where people say, well, look, it’s all determined by the laws of physics. You’re just made of atoms.  </p>



<p>Kevin M    01:07:03    The atoms are going to do what they’re going to do. What does you having a thought have to do with any of that? Nothing. It couldn’t possibly have any causal power, um, in the system. And I think that’s, uh, I think that’s completely wrong. It comes from a really impoverished view of causation, which is all basically bottom up and it ignores the causal, uh, potential of organization, which is a trivial commonplace sort of thing. Like we know that organization has, uh, you know, how some causal influence on the way that things happen, but people bizarrely seem to just reject that idea. And I can’t really understand why it seems to be so controversial.  </p>



<p>Erik H    01:07:49    It’s actually quite funny. Um, because most of the time, the people who are the most reductionist are often not always, but often, um, you know, all their science in, in their particular science, its elements occupy some from very high up place in the city show, temporal hierarchy of physical objects, right? This is something that I’ve talked about a lot and have been to bring back as like a really big issue. You know, if you say science is about reduction, you have to explain the fact that there are very, there is a huge diversity of scientific fields, which all take various, um, high-level components as units of function and causation. And, uh, I’m not the, the by, by any means the first person to say, there’s probably, I think one of the best statements of this comes from the 1970s by a philosopher, Jerry Fodor, um, who wrote something called like the unity of the special sciences as a working hypothesis, but his, his whole, the whole paper.  </p>



<p>Erik H    01:08:49    Um, and I think Jerry Frodo was probably one of the better analyte philosophers of the 20th century is basically just saying, listen, you can’t be a universal reductionist and still believe in science because science contains all sorts of things where people just sort of stop at some particular level of description, and then they like don’t want to reduce any further. And then his, his question is like, why don’t you want to reduce any further? And he doesn’t provide an answer to that, but he does point out that it’s, it’s a really big problem. You basically have to dismiss your own field and just say, well, if we could, we would. And that’s what I very much have been, trying to show is incorrect. And try to like prove is incorrect using simple, clear models because, um, you know, the moment you start talking even about physics in general, you’re talking about very complex systems that are difficult, but if you can show it in say salvia or automata, then you can be reasonably sure that it’s true, uh, you know, in the real physical world.  </p>



<p>Erik H    01:09:48    So, you know, all my research has basically been looking at things like cellular automata or like small, simple, you know, Markov chains and simple systems like that. And then, um, asking questions about, say, you know, causation, when you move through different different levels or scales, and, you know, you find some very surprising and concrete effects, which is that two macro states by which I just mean summary dimension reduction in micro states can have a stronger dependency than their underlying micro states. I mean, it’s almost like so trivially true that it sounds absurd, but as it is very simple example, imagine I have 10 micro states and those 10 micro states all transition to one another, depending on what state you’re in. So it’s just a mark off Jane, just 10 states. And let’s say the first, you know, uh, three states, two, two of the states all are equally likely to go to each other.  </p>



<p>Erik H    01:10:44    So a and B, if you’re in a, you could go to a, you could go to be equally likely 50, 50 chance, right? If you are in any of the other eight states, you are equally likely to go to one of those eight states. So you just bounce around, right? It’s like this. It’s like, if you’re in, if you’re in one of these eight states, you just bounce around between these two states, right? If you’re in these two, two states, you just bounce around. Okay, well, let’s think about some of the conditional dependencies, which was the traditional way to talk about causation in that system. And what you will quickly find is that the macro states of that system, if you just say these eight ladder eight states are macro state, you know, gamma, and the first two are macro state alpha or whatever, then you’ll quickly find that macro macro state alpha has a deterministic transition to itself.  </p>



<p>Erik H    01:11:31    And the other macro state also has a deterministic transition to itself, even though the micro states are essentially choosing at almost random. So there there’s almost no like causal, the causal relationships of the micro states are immensely noisy, but the macro state relationships, while there are fewer macro states, they are deterministic. They’re not only deterministic they’re that you can retro Dick too. So now I have perfect prediction and perfect retrodiction, which in my research, this is called determinism and degeneracy. So you, you have, you have perfect determinism and zero degeneracy, and you had a lot of in determinism in your micro state model. And my argument is that, um, you know, if you’re, if you’re, uh, if you want to choose between your models of the system, both are equally valid descriptions of the system, but one of those actually contains two strong dependencies. And the other one contains just a bunch of totally weak, noisy dependencies.  </p>



<p>Erik H    01:12:27    And according to various metrics of causation, they can be higher at this, uh, macro scale. And I think that a lot of the time, you know, it’s not just that what you want it, what you want to show to argue against like the universal reductionist position is not that somehow you can’t do this reduction. Of course you can, because what philosophers call super science holds. But if you do do the reduction, you lose something and quantifying that loss is what I’ve been been after. And we’ve, I’ve kind of proposed various measures by which one could do this. I happen to think that the latest one, which is just using the mutual information, which no one can argue with the mutual information, which is why I love it. You can’t get out, there’s no escape from the mutual information. If you show something into mutual information, you have to believe it.  </p>



<p>Erik H    01:13:15    So know, just even showing in the mutual information that you can lose, you can break down into mutual information to three different types and you can lose different types of information. When you go down in scale, you can transfer from synergistic to redundant information by reduction. And so what this shows is that it’s not just about compression, right? It’s not just that, yes, everything should be all our models of the world should just be physical models, but of microphysics, but we just don’t have a big enough supercomputer. Like we’re not Laplas as demons. So we can’t do it. It’s like, no, Laplas demon has a provably bad low-information model of the world because he doesn’t have good dependencies. The world that he sees is just completely in deterministic. And it’s just, it, there’s no boundaries. And the causal relationships are, are, are poor and uninformative, and are all the information is redundant. It’s not synergistic or unique. And, you know, when you think about it like that, I think that you can a strong argument that you, that, uh, higher-level scales, you know, you, you can talk about it philosophically that they’re like ontologically real, but certainly I think just talking about in terms of method and when we’re, when one should or should not reduce is probably the best way to talk about it.  </p>



<p>Kevin M    01:14:28    Yeah. Yeah. I mean, um, I agree, and, and I really appreciated your, your work on this because it, it, I think really clearly shows that, uh, it’s not the case that like, you know, someone like Sean Carroll, for example, the physicist talks about, um, you know, these, these other levels of description, higher levels of description as kind of convenient ways of talking about things. But the real truth is all down at the low level. And if we have the full picture of the low level, then we would have all the information that’s there. And the higher levels would emerge from in a very simplistic kind of sense would be completely determined by all that what’s happening at the lower levels. And of course, in an instant, uh, they’re instantiated in the low level organization, but that’s not where the explanation lies for how the system evolves.  </p>



<p>Kevin M    01:15:20    And we have to have the right level at which to, um, to interrogate that and to understand it. And I think actually neuroscience these days is catching up with the idea that, uh, in any field of cells, for example, you’ll have some pattern of activity, but it’s got, you know, tons and tons of different possible micro states. And it actually is a trajectory through a sort of a low dimensional manifold. And depending on what the trajectory is, that’s what the meaning of that state is. And then what happens in the system is based on the configuration of the filter, the set of synaptic connections, for example, that that manifold goes through. And so it can be configured in such a way that if it’s manifold a, this happens and if it’s manifold B that happens. And all of the low-level information is just lost. It’s, it’s, it’s, it’s too noisy to begin with, but it’s actually actively filtered out by, um, by the nervous system.  </p>



<p>Kevin M    01:16:19    So that’s what I mean, Paul, what I’m saying, the nervous system is, is running on meaning there it’s meaning it’s meaning that drives the mechanism and the meaning is inherent in the configuration of, um, of the, the, the circuitry. And it’s been put there, it’s been packed in there by evolution. So we have to think of it in and by experience. So we have to think of causation, not as this purely instantaneous sort of thing that just doesn’t capture why the system is the way it is. There can be some instantaneous trigger, but the structure of the system has causality packed into it. That is just often kind of ignored or seen as non non-scientific. It was almost banished by, you know, Francis bacon, who was just saying, well, look, we’ve got just the efficient cause. Uh, and the, the material cost, and we’re not going to talk about formal causes and, and final causes in our Aristotle’s, um, framing. And I think we’re still suffering from that, um, really narrow reductive, uh, both in, in space and level and time, uh, view of causation.  </p>



<p>Erik H    01:17:33    That’s really interesting, Kevin. So, you know, just in terms of like this notion of meaning, um, you know, I’d be very interested in seeing some sort of, uh, you know, connection between, you know, cause meaning is kind of, you can think about it as like this very, very high level phenomenon, right? So it’s, it’s kind of like, almost like what happens if you dimensionally reduce enough. Right. Of course, you know, in a sense we are saying something a bit different, right. So, you know, you’re, you’re focused on this notion of, of meaning and the different sort of notions of, you know, causation, you know, like the classic Aristotelian. And I do think that that’s a really interesting way to talk about these issues. And, but I want to be clear. I don’t, I’m not, um, I don’t, I don’t know to what degree it will kind of fully like I have not actually studied it.  </p>



<p>Erik H    01:18:25    Right. So, so I can’t speak to like the, the, the truth or falsity of it. Um, but I think it’s a very interesting approach, um, because I do agree that people take causation as this obvious primitive, like, and it’s not right. Like you have to specify what you mean. Right. And what you’re saying is like, well, okay, well, let’s be very serious about what you mean. Um, in your, in the example you gave of like the, uh, you know, someone like Sean Carroll, right? I mean, I think it’s when somebody says something like all the information is at the bottom, right. So that is directly provably wrong. So, and, but one has to be subtle when it has to be very subtle. So it’s, it’s provably wrong because the type of information that one is probably talking that someone’s probably talking about when they say that is something like the, like the coma girl of complexity or something where they’re like, yeah, like, listen, like the, you know, if you were to run a touring machine that was running the world and you ran it at the level of microphysics, it would, you know, the program that you need would be way longer than if you ran it at some higher level of scale.  </p>



<p>Erik H    01:19:28    Right. And that’s like, obviously true and totally irrelevant to any discussion about the causal powers of higher level skills. What instead you have to look at is, you know, like a measure of causation or a measure of information, and you have to specify what measure you’re talking about. And, you know, like for example, in the, in the latest research that we did, we were using the mutual information, would, you can break down into redundant, unique or synergistic components. And what we showed is that yes, you can, you can have the same mutual information at the lowest level scale, and at a higher level scale, but at a higher level scale, that mutual information, all those bits are synergistic. And at the lowest level scale, all those bits are redundant. Okay. So choose between your two models, right? You can have a model where everything is just redundant bits, or you can have a model where all the interactions are synergistic and that synergistic effect disappears when you reduce it.  </p>



<p>Erik H    01:20:27    So I want to be clear that as synergistic information disappears when you do that. So when you say, if somebody says all the information is at the bottom, it’s like, no, it’s literally not. And so I just think that this has been basically a massive confusion of people who weren’t like sitting around with simple models and just like specifying what they meant, and then w and also a good confusion of reduction with like universal reduction versus like taking things apart helps you understand what they do well, that is very, very obvious, right. But also not completely true, because we obviously always stop at a certain point and declare kind of victory or declare that we have a good sub field of science here with these kind of primitive atoms of function and so on. So I, I really think that I hate to talk about free will, because I do think that consciousness comes in and is the wild card in this discussion. But certainly I think one could even just say, just talking about neurons versus Adams, that there is zero is way stronger claim that your actions are caused by the macro states of your neurons, then by the underlying micro states of the atoms that make them up,  </p>



<p>Kevin M    01:21:35    Which I think is totally true. Yeah. And I think a parallel way to argue that is, um, first of all, that I think physical indeterminacy is just not the case that there is real indeterminacy in the university. That’s a whole, whole other discussion and it, uh, physics hasn’t agreed on it. Um, but I think it’s, it’s right to say that the low level details of any neural state or the state of all the atoms inside a sickle cell by themselves, don’t fully determine the next state. And, and, uh, so you’ve got some neural state, it doesn’t determine the next state and you also have this multiple realizability and that many, many different micro states can mean the same thing at a macro at a macro level, which is where that, um, causation comes in. So if the, if the low-level details, don’t determine the next outcome of the whole brain, then, you know, you can ask, well, what does settle the outcome and what I would argue and many others is that it’s, it’s the meaning in the macro states. That is what drives the, the system. And that’s where, um, and that’s where I think, you know, agency and ultimately what we refer to in humans as free will, um, can be found because the there’s some causal slack in the system, it’s not just reductive the high level things, drive it. And that’s basically what we mean when we say we’re deciding things, is that we’re doing them for our reasons. I was  </p>



<p>Paul M    01:23:05    Going to ask a naive question. And this is about from, from both perspectives, I think from both of your work, what role. So, so Eric, in, in your work, and again, this is naive, but when you make a macro state out of micro you’re, you’re making a move, but mathematically there there’s no cost to making that move. You just state it. And it happens, but in a physical world, in the real world too, to create a macro state, there might be physical Thermo dynamical penalty cost, right. To actually instantiating that in a physical world. And I’m, and this gets back to, um, and I don’t know if we need to talk about this more, but the idea of constraints and boundaries and just environment and the impact that the whatever system there is, the, the, the interaction with the environment and how that relates to, to the information and, uh, that, that you can glean from the micro and macro states and the emergence, whether emergence necessarily needs to take in, um, considerations from, from its environment.  </p>



<p>Erik H    01:24:09    Yeah. So just to clear up a confusion, there’s definitely no cost to like creating a macro state. I mean, like, as a very simple example, you know, when we talk about colors, right. Uh, obviously if I say, you know, my wall is a certain color, what I’m saying is basically that there’s some sort of macro state where, and if you kind of averaged together, I’ll like the colors of my wallet it’ll end up being this. I don’t really mean that every single, you know, like, like the, the, the finer and finer, you zoom in that that color is just completely consistently maintained, right. That would be the description of the micro states of all the colors. And then you could go even further and further, right. Uh, of all the states. So it’s just describing something at a particular level. And just to differentiate, maybe just to try to differentiate Kevin and Maya’s position a little bit, cause it sounds like we agree on so much, um, would be, you know, on, um, this, this notion of, uh, you know, for me, the notion of meaning does not yet have to come into play for many of these phenomenons.  </p>



<p>Erik H    01:25:12    Now, maybe it does come into play with something like a nervous system. As I said, I’m like open to this kind of interesting notion of like macro state, somehow gaining meaning. But just, if you look at like the macro state descriptions of a very simple system, they can be completely deterministic and therefore kind of constrained the past and the future to a greater degree than any of their underlying micro states. And it’s not a contradiction. So this is, this is a completely non contradictory thing to say. And I’ll give you a very simple example, which is, let’s assume that the macro state we’re talking about is your behavior. But again, this could be a very simple macro state. I’m just using this as an example, you know, I can predict what physiological macro state you’re going to be in, in about 12 hours, right. You’re going to be asleep.  </p>



<p>Erik H    01:26:02    So, you know, my, my, my, my, my guess off of your current macro state, which is like, you know, you’re awake and it’s this time, and you’re feeling like this, my current guess is that I’m going to predict with, like, let’s say a 90% probability that you’re going to be asleep and that’s going to be the macro state that you’re going to be in. Now, my, now you compare that to like little plazas demon, right. And you’re gonna say, okay, Laplas bless his demon. Why don’t you make your prediction now about what their future state’s going to be. But here’s the thing I’m going to restrict you to talking about it in the language of microphysics, because that’s, what’s supposed to be so special about you, the pasta demon. So you gotta make this bet in terms of microphysics and the boss’s demon takes in your entire micro physical state.  </p>



<p>Erik H    01:26:41    Right. And then I say, okay, which micro physical state are they going to be in, in 12 hours? Right. And Lovaza demon says, well, listen, I’m going to give you a really big list of possible micro physical states, all with vanishingly, small transition probabilities. So I’m going to say, listen, you’ve got a one in a billion chance to be in this micro state, one in a million chance to be in this micro state and so on. Why? Because you’re getting hit by solar flare, you fly solar rays and like other, you, you’re an open system. It’s just impossible. So the buses, Stephen makes this big list and you say, well, listen, the applause, this demon like the dependencies between the current state and this huge list of possible state, the dependency here is vanishingly small, right? You’re just saying like, you’re, you’re, you’re basically not bear.  </p>



<p>Erik H    01:27:22    You’re only, you’re not even probabilistically sufficient, and you’re definitely not necessary for any of these states and so on. But my macro scale description suddenly I was able to get a really nice deterministic causal relationship. And as Kevin said, the, the world, and particularly at the micro physical level for open systems, even for getting physics is itself deterministic. If you take the full state of the universe, but we’re all open systems and you’re incredibly noisy. So, you know, you really are gaining something by talking about it in terms of macro states. And I think that that’s probably something that scientists are implicitly cuing in on when they find a nice field of study, because it’s like, oh, look, I can intervene on things here. And I get nice kind of deterministic responses. And, you know, my, my theory is basically that what you want to do is you want to maximize the determined to see, well, also maximizing this, the size of the model that you have. So you want to find that sweet spot where you’re as deterministic your, your, like your interventions to effects are that the conditional dependencies are as deterministic as they can. And you haven’t dimensionally reduced too much, you know? Cause you could just dimensionally reduce everything into one big macro state and declare victory. Right? So  </p>



<p>Kevin M    01:28:34    Yeah, the interesting thing I think is that that’s what the nervous system is doing and it has to, because all those components are really noisy. Um, and neurons are just not great information and processors in isolation. And of course you go from, you know, you go from analog signals to digital and back to analog and, and you’re, you’re filtering all kinds of information and doing it and doing it really actively, you know, every time you go from one setup to the next that that neurons doing it’s may, you know, you’re performing some operations, um, on that, on that information either at a single synopsis or collectively from one area to the next. So, I mean, in a, in a sense there’s a cost to that, uh, in that that’s the activity, the work that the nervous system is doing, but on the other hand, it’s far less work to do that than to try to transmit all the individual bits of information. In fact, it’s pointless to, to just keep transmitting the same information along a chain without, without operating on it or extracting any, as I would say, meaning from it,  </p>



<p>Paul M    01:29:38    I was worried this was going to happen. I have about 400 more questions to ask you guys, but, uh, the, I guess my questions are micro states and we’ll have to, we’ll have to, let me, let me ask one more question here. And I don’t know, um, if you guys will, will differ on this and then we can wrap up, um, so the question is, do we need to understand life to build intelligence? Um, and under, in other words is building artificial life, essentially a precursor to building artificial, a necessary precursor to building artificial intelligence. And my gut tells me Kevin, your work with understanding agents to try and understand how agency evolved would suggest that you do think that intelligence, presupposes, essentially life processes, homeostatic processes, and Eric, and I don’t know where you land, but maybe who knows if you guys will disagree or agree, but that’s the, that’s the question.  </p>



<p>Kevin M    01:30:34    Okay, well maybe I’ll jump in there. So it’s really, really interesting. And I think in a sense, it depends on what you mean by intelligence. And there’s a been a tendency, especially in AI, you know, to think of intelligence in terms of things like playing chess and, you know, sort of logical, really cerebral, um, operations. Um, when, you know, for most things on the planet, intelligence means behaving in a way that keeps you alive. You know, I think intelligent behavior is, is things that keep the organism alive. So if you think of it in that sense, um, you know, it’s, it’s goal directed behavior. Um, and of course you can do information processing and cognition that underpins it. But my own feeling is that if we want to get to something that we would say has a kind of autonomous intelligence, then we do need to give it agency really. I know we need to, we need to make it a, um, a living autonomous thing that it has some sort of causal insulation from the rest of the world. And, um, isn’t just being, you know, it’s not just a stimulus response kind of machine, uh, or something that’s doing information processing in isolation from any, from any real-world goals. I think we need to give it a to care. Uh, and the easiest way to do that is to make it care about itself because that’s what life does. What do you think, Eric?  </p>



<p>Erik H    01:32:09    Yes. I mean, I, again, pinching greatly on the definition of intelligence, right? So I think to get the sort of a gentle, uh, behavior that the average person would say, oh, this, this AI is intelligent or something like that. Uh, w most people wouldn’t even say that about Siri, right? Uh, like, like why wouldn’t they, right. Siri actually seems to know quite a bit. Um, they’d be like, well, Syria is no, like animus, like Siri just sits there and gets queries and spits back answers, you know? And it’s, it’s really not what we think of when we, when we think about intelligence, but I would say I, I’m also a little bit, maybe just to, to finally bring this all in full circle, um, you know, the sort of techniques that people use to build these artificial intelligences and so on. Look to me a lot more like, like cheats than the way that biology does it, you know, just like you’re, you’re, you’re what you’re literally doing is you’re just rewiring the system until you get the answer that you want.  </p>



<p>Erik H    01:33:10    Right. And then you do that over and over and over and over again. And that’s what backpropagation is. And you know, it, it would be like if I, if you were taking a test and, uh, you know, you got, you got certain number of answers wrong, and then your teacher rewired your brain such that you would have gotten those answers correctly on this multiple choice question. And then they give you another test and you’re performing better on it. But what did you learn? Like, you didn’t learn anything, they just rewired your brain. Uh, you didn’t have the experience of learning. You didn’t, you don’t even feel like you’ve learned, right? Like, you’re, you’re, you’re just getting better at these questions. So I wonder if sometimes we are, uh, if we, we, might’ve just discovered like very different ways to do things and that the concerns of biology, which might include things like metabolic efficiency and so on, just completely ignored all these sorts of solutions, because they’re like, they’re not practical if you don’t have good training and testing data, and they’re not practical, if you can’t do post-hoc regularization and they’re not practical, if you can’t do blah, blah, blah.  </p>



<p>Erik H    01:34:07    Right. So I wonder if, you know, again, maybe what you really want for intelligence is something that’s just conscious. And once it’s conscious, it’ll kind of look and act like us and, uh, or not look like us obviously, but it’ll act like us. And, uh, you know, it just requires a total reframing, but, well,  </p>



<p>Kevin M    01:34:24    Let’s see. I think actually, just to, just to pick up one thing there on the energy efficiency thing, I think if anything is going to drive a move in AI towards a more biologically inspired, um, framing, I think it’s that it’s power consumption is going to be the limit of deep learning and, and, and the, you know, those kinds of, um, those kinds of methods. And there’s going to have to be a rethink of however, the hell it is that that brains managed to do this on, you know, whatever it’s 20 Watts or whatever, you know, something like that. So, um, I do think that there may be a radical rethink, um, which brings us closer to biological intelligence by virtue of having to be inspired by that biological design.  </p>



<p>Erik H    01:35:11    I, I certainly hope so because, you know, the fact that something like GPT three can spit out a poem, and I don’t think that it has consciousness or mind. I, I wouldn’t say it has any of those things, uh, is, is, is a horror to me. So like, as a writer, like, I literally think it’s hard. Um, and, uh, if you think about it enough, I think it becomes a horror. And, and so, you know, I, I, I I’d love for it to actually be conscious because then at least, you know, I’m competing against, you know, an actual agent rather than just this like weird statistical summation game, uh, that is totally mindless and yet can like, you know, put out good Shakespeare poetry.  </p>



<p>Paul M    01:35:51    So, Eric, uh, I mean, that, that’s a good note to end on because I don’t think GP T3 actually has your vocabulary reading, reading the revelations, like the, uh, just the, the, the descriptions. I mean, a lot of it is borderline poetry, uh, the way that you narrate and describe what’s happening. I mean, it’s really like just a pleasure to read so much of what you wrote in the book. So congratulations again, on the novel, Kevin, I can’t say that you write, uh, poetically in an eight. Maybe you are going to include some poetry is, is the new book about agency?  </p>



<p>Kevin M    01:36:24    The book is about, is about agency and I can’t wait, ultimately freewill, and I don’t think there’s going to be too much poetry in it, not for me anyway,  </p>



<p>Paul M    01:36:31    But anyway, it’s very enjoyable on my normal reading level, which is the nonfiction. And so I, I greatly, uh, just enjoy both of your works and research. Keep it up, guys. I really appreciate you being with me. Thanks. Thanks very much.  </p>



<p>Erik H    01:36:42    Thank you so much, Paul. It’s been, it’s been awesome catching up in, in somewhat in person, Kevin. Likewise.  </p>



<p>Kevin M    01:36:47    Thanks a lot.  </p>



<p>Paul M    01:36:54    Brain inspired is a production of me and you. I don’t do advertisements. You can support the show through Patrion for a trifling amount and get access to the full versions of all the episodes. Plus bonus episodes that focus more on the cultural side, but still have science go to brain inspired.co and find the red Patrion button there to get in touch with me, emailPaul@braininspired.co. The music you hear is by the new year. Find them@thenewyear.net. Thank you for your support. See you next time.  </p>

</div></div>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/111-Kevin-Mitchell-and-Erik-Hoel-public.mp3" length="94450233"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[








Kevin Mitchell Pic Paul Sharp/SHARPPIX



Erik, Kevin, and I discuss… well a lot of things. 



Erik’s recent novel The Revelations is a story about a group of neuroscientists trying to develop a good theory of consciousness (with a murder mystery plot). 



Kevin’s book Innate – How the Wiring of Our Brains Shapes Who We Are describes the messy process of getting from DNA, traversing epigenetics and development, to our personalities. 



We talk about both books, then dive deeper into topics like whether brains evolved for moving our bodies vs. consciousness, how information theory is lending insights to emergent phenomena, and the role of agency with respect to what counts as intelligence.











Kevin’s website.Eriks’ website.Twitter: @WiringtheBrain (Kevin); @erikphoel (Erik)Books:INNATE – How the Wiring of Our Brains Shapes Who We AreThe RevelationsPapersErikFalsification and consciousness.The emergence of informative higher scales in complex networks.Emergence as the conversion of information: A unifying theory.



Timestamps



0:00 – Intro3:28 – The Revelations – Erik’s novel15:15 – Innate – Kevin’s book22:56 – Cycle of progress29:05 – Brains for movement or consciousness?46:46 – Freud’s influence59:18 – Theories of consciousness1:02:02 – Meaning and emergence1:05:50 – Reduction in neuroscience1:23:03 – Micro and macro – emergence1:29:35 – Agency and intelligence


Transcript

Eric, a fresh congratulations to you and Kevin, a stale, I suppose if two years to three years to innate was written in 2018, right. Or published, I don’t know when it was written and Eric a fresh congratulations to you. Although I know that the revelations, your book has been a long...]]>
                </itunes:summary>
                                                                            <itunes:duration>01:38:04</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI NMA 03: Stochastic Processes Panel]]>
                </title>
                <pubDate>Thu, 22 Jul 2021 09:47:23 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-nma-03-stochastic-processes-panel</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-nma-03-stochastic-processes-panel</link>
                                <description>
                                            <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2021/07/art-nma-3-01.jpg" alt="" class="wp-image-1258" />



<div class="wp-block-image"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></div>



<p>Panelists:</p>



<ul><li><a href="https://nivlab.princeton.edu/" target="_blank" rel="noreferrer noopener">Yael Niv</a>.<ul><li><a href="https://twitter.com/yael_niv">@yael_niv</a></li></ul></li><li><a href="http://koerding.com/">Konrad Kording</a><ul><li><a href="https://twitter.com/KordingLab">@KordingLab</a>.</li><li>Previous BI episodes:<ul><li><a href="https://braininspired.co/podcast/27/">BI 027 Ioana Marinescu &amp; Konrad Kording: Causality in Quasi-Experiments</a>.</li><li><a href="https://braininspired.co/wp-admin/post.php?post=588&amp;action=edit">BI 014 Konrad Kording: Regulators, Mount Up!</a></li></ul></li></ul></li><li><a href="https://gershmanlab.com/index.html">Sam Gershman</a>.<ul><li><a href="https://twitter.com/gershbrain" target="_blank" rel="noreferrer noopener">@gershbrain</a>.</li><li>Previous BI episodes:<ul><li><a href="https://braininspired.co/wp-admin/post.php?post=1154&amp;action=edit">BI 095 Chris Summerfield and Sam Gershman: Neuro for AI?</a></li><li><a href="https://braininspired.co/wp-admin/post.php?post=652&amp;action=edit">BI 028 Sam Gershman: Free Energy Principle &amp; Human Machines</a>.</li></ul></li></ul></li><li><a href="https://www.ndcn.ox.ac.uk/team/timothy-behrens">Tim Behrens</a>.<ul><li><a href="https://twitter.com/behrenstimb">@behrenstim</a>.</li><li>Previous BI episodes:<ul><li><a href="https://braininspired.co/wp-admin/post.php?post=730&amp;action=edit">BI 035 Tim Behrens: Abstracting &amp; Generalizing Knowledge, &amp; Human Replay</a>.</li><li><a href="https://braininspired.co/wp-admin/post.php?post=630&amp;action=edit">BI 024 Tim Behrens: Cognitive Maps</a>.</li></ul></li></ul></li></ul>



<p>This is the third in a series of panel discussions in collaboration with <a href="https://academy.neuromatch.io/home">Neuromatch Academy</a>, the online computational neuroscience summer school. In this episode, the panelists discuss their experiences with stochastic processes, including Bayes, decision-making, optimal control, reinforcement learning, and causality.</p>



<p>The other panels: </p>



<ul><li><a href="https://braininspired.co/podcast/nma-1/">First panel</a>, about model fitting, GLMs/machine learning, dimensionality reduction, and deep learning.</li><li><a href="https://braininspired.co/podcast/nma-2/">Second panel</a>, about linear systems, real neurons, and dynamic networks.</li><li><a href="https://braininspired.co/podcast/nma-4/">Fourth panel</a>, about basics in deep learning, including Linear deep learning, Pytorch, multi-layer-perceptrons, optimization, &amp; regularization.</li><li><a href="https://braininspired.co/podcast/nma-5/">Fifth panel</a>, about “doing more with fewer parameters: Convnets, RNNs, attention &amp; transformers, generative models (VAEs &amp; GANs).</li><li><a href="https://braininspired.co/podcast/nma-6/">Sixth panel</a>, about advanced topics in deep learning: unsupervised &amp; self-supervised learning, reinforcement learning, continual learning/causality.</li></ul>
]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[








Panelists:



Yael Niv.@yael_nivKonrad Kording@KordingLab.Previous BI episodes:BI 027 Ioana Marinescu & Konrad Kording: Causality in Quasi-Experiments.BI 014 Konrad Kording: Regulators, Mount Up!Sam Gershman.@gershbrain.Previous BI episodes:BI 095 Chris Summerfield and Sam Gershman: Neuro for AI?BI 028 Sam Gershman: Free Energy Principle & Human Machines.Tim Behrens.@behrenstim.Previous BI episodes:BI 035 Tim Behrens: Abstracting & Generalizing Knowledge, & Human Replay.BI 024 Tim Behrens: Cognitive Maps.



This is the third in a series of panel discussions in collaboration with Neuromatch Academy, the online computational neuroscience summer school. In this episode, the panelists discuss their experiences with stochastic processes, including Bayes, decision-making, optimal control, reinforcement learning, and causality.



The other panels: 



First panel, about model fitting, GLMs/machine learning, dimensionality reduction, and deep learning.Second panel, about linear systems, real neurons, and dynamic networks.Fourth panel, about basics in deep learning, including Linear deep learning, Pytorch, multi-layer-perceptrons, optimization, & regularization.Fifth panel, about “doing more with fewer parameters: Convnets, RNNs, attention & transformers, generative models (VAEs & GANs).Sixth panel, about advanced topics in deep learning: unsupervised & self-supervised learning, reinforcement learning, continual learning/causality.
]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI NMA 03: Stochastic Processes Panel]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2021/07/art-nma-3-01.jpg" alt="" class="wp-image-1258" />



<div class="wp-block-image"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></div>



<p>Panelists:</p>



<ul><li><a href="https://nivlab.princeton.edu/" target="_blank" rel="noreferrer noopener">Yael Niv</a>.<ul><li><a href="https://twitter.com/yael_niv">@yael_niv</a></li></ul></li><li><a href="http://koerding.com/">Konrad Kording</a><ul><li><a href="https://twitter.com/KordingLab">@KordingLab</a>.</li><li>Previous BI episodes:<ul><li><a href="https://braininspired.co/podcast/27/">BI 027 Ioana Marinescu &amp; Konrad Kording: Causality in Quasi-Experiments</a>.</li><li><a href="https://braininspired.co/wp-admin/post.php?post=588&amp;action=edit">BI 014 Konrad Kording: Regulators, Mount Up!</a></li></ul></li></ul></li><li><a href="https://gershmanlab.com/index.html">Sam Gershman</a>.<ul><li><a href="https://twitter.com/gershbrain" target="_blank" rel="noreferrer noopener">@gershbrain</a>.</li><li>Previous BI episodes:<ul><li><a href="https://braininspired.co/wp-admin/post.php?post=1154&amp;action=edit">BI 095 Chris Summerfield and Sam Gershman: Neuro for AI?</a></li><li><a href="https://braininspired.co/wp-admin/post.php?post=652&amp;action=edit">BI 028 Sam Gershman: Free Energy Principle &amp; Human Machines</a>.</li></ul></li></ul></li><li><a href="https://www.ndcn.ox.ac.uk/team/timothy-behrens">Tim Behrens</a>.<ul><li><a href="https://twitter.com/behrenstimb">@behrenstim</a>.</li><li>Previous BI episodes:<ul><li><a href="https://braininspired.co/wp-admin/post.php?post=730&amp;action=edit">BI 035 Tim Behrens: Abstracting &amp; Generalizing Knowledge, &amp; Human Replay</a>.</li><li><a href="https://braininspired.co/wp-admin/post.php?post=630&amp;action=edit">BI 024 Tim Behrens: Cognitive Maps</a>.</li></ul></li></ul></li></ul>



<p>This is the third in a series of panel discussions in collaboration with <a href="https://academy.neuromatch.io/home">Neuromatch Academy</a>, the online computational neuroscience summer school. In this episode, the panelists discuss their experiences with stochastic processes, including Bayes, decision-making, optimal control, reinforcement learning, and causality.</p>



<p>The other panels: </p>



<ul><li><a href="https://braininspired.co/podcast/nma-1/">First panel</a>, about model fitting, GLMs/machine learning, dimensionality reduction, and deep learning.</li><li><a href="https://braininspired.co/podcast/nma-2/">Second panel</a>, about linear systems, real neurons, and dynamic networks.</li><li><a href="https://braininspired.co/podcast/nma-4/">Fourth panel</a>, about basics in deep learning, including Linear deep learning, Pytorch, multi-layer-perceptrons, optimization, &amp; regularization.</li><li><a href="https://braininspired.co/podcast/nma-5/">Fifth panel</a>, about “doing more with fewer parameters: Convnets, RNNs, attention &amp; transformers, generative models (VAEs &amp; GANs).</li><li><a href="https://braininspired.co/podcast/nma-6/">Sixth panel</a>, about advanced topics in deep learning: unsupervised &amp; self-supervised learning, reinforcement learning, continual learning/causality.</li></ul>
]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/NMA-03-Normativity.mp3" length="58663415"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[








Panelists:



Yael Niv.@yael_nivKonrad Kording@KordingLab.Previous BI episodes:BI 027 Ioana Marinescu & Konrad Kording: Causality in Quasi-Experiments.BI 014 Konrad Kording: Regulators, Mount Up!Sam Gershman.@gershbrain.Previous BI episodes:BI 095 Chris Summerfield and Sam Gershman: Neuro for AI?BI 028 Sam Gershman: Free Energy Principle & Human Machines.Tim Behrens.@behrenstim.Previous BI episodes:BI 035 Tim Behrens: Abstracting & Generalizing Knowledge, & Human Replay.BI 024 Tim Behrens: Cognitive Maps.



This is the third in a series of panel discussions in collaboration with Neuromatch Academy, the online computational neuroscience summer school. In this episode, the panelists discuss their experiences with stochastic processes, including Bayes, decision-making, optimal control, reinforcement learning, and causality.



The other panels: 



First panel, about model fitting, GLMs/machine learning, dimensionality reduction, and deep learning.Second panel, about linear systems, real neurons, and dynamic networks.Fourth panel, about basics in deep learning, including Linear deep learning, Pytorch, multi-layer-perceptrons, optimization, & regularization.Fifth panel, about “doing more with fewer parameters: Convnets, RNNs, attention & transformers, generative models (VAEs & GANs).Sixth panel, about advanced topics in deep learning: unsupervised & self-supervised learning, reinforcement learning, continual learning/causality.
]]>
                </itunes:summary>
                                                                            <itunes:duration>01:00:48</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI NMA 02: Dynamical Systems Panel]]>
                </title>
                <pubDate>Thu, 15 Jul 2021 08:36:00 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-nma-02-dynamical-systems-panel</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-nma-02-dynamical-systems-panel</link>
                                <description>
                                            <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2021/07/art-nma-2-01.jpg" alt="" class="wp-image-1255" />



<p>Panelists:</p>



<ul><li><a href="https://fairhalllab.com/">Adrienne Fairhall</a>.<ul><li><a href="https://twitter.com/alfairhall">@alfairhall</a>.</li></ul></li><li><a href="http://bingbrunton.com">Bing Brunton</a>.<ul><li><a href="https://twitter.com/bingbrunton">@bingbrunton</a>.</li></ul></li><li><a href="https://www.rajanlab.com/">Kanaka Rajan</a>.<ul><li><a href="https://twitter.com/rajankdr?lang=en">@rajankdr</a>.</li><li><a href="https://braininspired.co/podcast/54/">BI 054 Kanaka Rajan: How Do We Switch Behaviors?</a></li></ul></li></ul>



<p>This is the second in a series of panel discussions in collaboration with <a href="https://academy.neuromatch.io/home">Neuromatch Academy</a>, the online computational neuroscience summer school. In this episode, the panelists discuss their experiences with linear systems, real neurons, and dynamic networks. </p>



<p>Other panels:</p>



<ul><li> <a href="https://braininspired.co/podcast/nma-1/">First panel</a>, about model fitting, GLMs/machine learning, dimensionality reduction, and deep learning.</li><li><a href="https://braininspired.co/podcast/nma-3/">Third panel</a>, about stochastic processes, including Bayes, decision-making, optimal control, reinforcement learning, and causality.</li><li><a href="https://braininspired.co/podcast/nma-4/">Fourth panel</a>, about basics in deep learning, including Linear deep learning, Pytorch, multi-layer-perceptrons, optimization, &amp; regularization.</li></ul>
]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[




Panelists:



Adrienne Fairhall.@alfairhall.Bing Brunton.@bingbrunton.Kanaka Rajan.@rajankdr.BI 054 Kanaka Rajan: How Do We Switch Behaviors?



This is the second in a series of panel discussions in collaboration with Neuromatch Academy, the online computational neuroscience summer school. In this episode, the panelists discuss their experiences with linear systems, real neurons, and dynamic networks. 



Other panels:



 First panel, about model fitting, GLMs/machine learning, dimensionality reduction, and deep learning.Third panel, about stochastic processes, including Bayes, decision-making, optimal control, reinforcement learning, and causality.Fourth panel, about basics in deep learning, including Linear deep learning, Pytorch, multi-layer-perceptrons, optimization, & regularization.
]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI NMA 02: Dynamical Systems Panel]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2021/07/art-nma-2-01.jpg" alt="" class="wp-image-1255" />



<p>Panelists:</p>



<ul><li><a href="https://fairhalllab.com/">Adrienne Fairhall</a>.<ul><li><a href="https://twitter.com/alfairhall">@alfairhall</a>.</li></ul></li><li><a href="http://bingbrunton.com">Bing Brunton</a>.<ul><li><a href="https://twitter.com/bingbrunton">@bingbrunton</a>.</li></ul></li><li><a href="https://www.rajanlab.com/">Kanaka Rajan</a>.<ul><li><a href="https://twitter.com/rajankdr?lang=en">@rajankdr</a>.</li><li><a href="https://braininspired.co/podcast/54/">BI 054 Kanaka Rajan: How Do We Switch Behaviors?</a></li></ul></li></ul>



<p>This is the second in a series of panel discussions in collaboration with <a href="https://academy.neuromatch.io/home">Neuromatch Academy</a>, the online computational neuroscience summer school. In this episode, the panelists discuss their experiences with linear systems, real neurons, and dynamic networks. </p>



<p>Other panels:</p>



<ul><li> <a href="https://braininspired.co/podcast/nma-1/">First panel</a>, about model fitting, GLMs/machine learning, dimensionality reduction, and deep learning.</li><li><a href="https://braininspired.co/podcast/nma-3/">Third panel</a>, about stochastic processes, including Bayes, decision-making, optimal control, reinforcement learning, and causality.</li><li><a href="https://braininspired.co/podcast/nma-4/">Fourth panel</a>, about basics in deep learning, including Linear deep learning, Pytorch, multi-layer-perceptrons, optimization, &amp; regularization.</li></ul>
]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/NMA-2-Dynamical-Systems-Panel.mp3" length="72755739"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[




Panelists:



Adrienne Fairhall.@alfairhall.Bing Brunton.@bingbrunton.Kanaka Rajan.@rajankdr.BI 054 Kanaka Rajan: How Do We Switch Behaviors?



This is the second in a series of panel discussions in collaboration with Neuromatch Academy, the online computational neuroscience summer school. In this episode, the panelists discuss their experiences with linear systems, real neurons, and dynamic networks. 



Other panels:



 First panel, about model fitting, GLMs/machine learning, dimensionality reduction, and deep learning.Third panel, about stochastic processes, including Bayes, decision-making, optimal control, reinforcement learning, and causality.Fourth panel, about basics in deep learning, including Linear deep learning, Pytorch, multi-layer-perceptrons, optimization, & regularization.
]]>
                </itunes:summary>
                                                                            <itunes:duration>01:15:28</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI NMA 01: Machine Learning Panel]]>
                </title>
                <pubDate>Mon, 12 Jul 2021 05:48:13 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-nma-01-machine-learning-panel</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-nma-01-machine-learning-panel</link>
                                <description>
                                            <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2021/07/art-nma-1-01.jpg" alt="" class="wp-image-1253" />



<p>Panelists:</p>



<ul><li><a href="https://www.lim.bio/">Athena Akrami</a>: <a href="https://twitter.com/AthenaAkrami">@AthenaAkrami</a>.</li><li><a href="https://www.seas.harvard.edu/person/demba-ba">Demba Ba</a>.</li><li><a href="http://compneurosci.com/">Gunnar Blohm</a>: <a href="https://twitter.com/GunnarBlohm">@GunnarBlohm</a>.</li><li><a href="https://www.psy.pku.edu.cn/english/people/faculty/professor/kunlinwei/index.htm">Kunlin Wei</a>.</li></ul>



<p>This is the first in a series of panel discussions in collaboration with <a href="https://academy.neuromatch.io/home" target="_blank" rel="noreferrer noopener">Neuromatch Academy</a>, the online computational neuroscience summer school. In this episode, the panelists discuss their experiences with model fitting, GLMs/machine learning, dimensionality reduction, and deep learning.</p>



<p>Other panels:</p>



<ul><li><a href="https://braininspired.co/podcast/nma-2/">Second panel</a>, about linear systems, real neurons, and dynamic networks.</li><li><a href="https://braininspired.co/podcast/nma-3/">Third panel</a>, about stochastic processes, including Bayes, decision-making, optimal control, reinforcement learning, and causality.</li><li><a href="https://braininspired.co/podcast/nma-4/">Fourth panel</a>, about basics in deep learning, including Linear deep learning, Pytorch, multi-layer-perceptrons, optimization, &amp; regularization.</li><li><a href="https://braininspired.co/podcast/nma-5/">Fifth panel</a>, about “doing more with fewer parameters: Convnets, RNNs, attention &amp; transformers, generative models (VAEs &amp; GANs).</li><li><a href="https://braininspired.co/podcast/nma-6/">Sixth panel</a>, about advanced topics in deep learning: unsupervised &amp; self-supervised learning, reinforcement learning, continual learning/causality.</li></ul>
]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[




Panelists:



Athena Akrami: @AthenaAkrami.Demba Ba.Gunnar Blohm: @GunnarBlohm.Kunlin Wei.



This is the first in a series of panel discussions in collaboration with Neuromatch Academy, the online computational neuroscience summer school. In this episode, the panelists discuss their experiences with model fitting, GLMs/machine learning, dimensionality reduction, and deep learning.



Other panels:



Second panel, about linear systems, real neurons, and dynamic networks.Third panel, about stochastic processes, including Bayes, decision-making, optimal control, reinforcement learning, and causality.Fourth panel, about basics in deep learning, including Linear deep learning, Pytorch, multi-layer-perceptrons, optimization, & regularization.Fifth panel, about “doing more with fewer parameters: Convnets, RNNs, attention & transformers, generative models (VAEs & GANs).Sixth panel, about advanced topics in deep learning: unsupervised & self-supervised learning, reinforcement learning, continual learning/causality.
]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI NMA 01: Machine Learning Panel]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2021/07/art-nma-1-01.jpg" alt="" class="wp-image-1253" />



<p>Panelists:</p>



<ul><li><a href="https://www.lim.bio/">Athena Akrami</a>: <a href="https://twitter.com/AthenaAkrami">@AthenaAkrami</a>.</li><li><a href="https://www.seas.harvard.edu/person/demba-ba">Demba Ba</a>.</li><li><a href="http://compneurosci.com/">Gunnar Blohm</a>: <a href="https://twitter.com/GunnarBlohm">@GunnarBlohm</a>.</li><li><a href="https://www.psy.pku.edu.cn/english/people/faculty/professor/kunlinwei/index.htm">Kunlin Wei</a>.</li></ul>



<p>This is the first in a series of panel discussions in collaboration with <a href="https://academy.neuromatch.io/home" target="_blank" rel="noreferrer noopener">Neuromatch Academy</a>, the online computational neuroscience summer school. In this episode, the panelists discuss their experiences with model fitting, GLMs/machine learning, dimensionality reduction, and deep learning.</p>



<p>Other panels:</p>



<ul><li><a href="https://braininspired.co/podcast/nma-2/">Second panel</a>, about linear systems, real neurons, and dynamic networks.</li><li><a href="https://braininspired.co/podcast/nma-3/">Third panel</a>, about stochastic processes, including Bayes, decision-making, optimal control, reinforcement learning, and causality.</li><li><a href="https://braininspired.co/podcast/nma-4/">Fourth panel</a>, about basics in deep learning, including Linear deep learning, Pytorch, multi-layer-perceptrons, optimization, &amp; regularization.</li><li><a href="https://braininspired.co/podcast/nma-5/">Fifth panel</a>, about “doing more with fewer parameters: Convnets, RNNs, attention &amp; transformers, generative models (VAEs &amp; GANs).</li><li><a href="https://braininspired.co/podcast/nma-6/">Sixth panel</a>, about advanced topics in deep learning: unsupervised &amp; self-supervised learning, reinforcement learning, continual learning/causality.</li></ul>
]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/BI-NMA-01-Machine-Learning-Panel.mp3" length="84017229"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[




Panelists:



Athena Akrami: @AthenaAkrami.Demba Ba.Gunnar Blohm: @GunnarBlohm.Kunlin Wei.



This is the first in a series of panel discussions in collaboration with Neuromatch Academy, the online computational neuroscience summer school. In this episode, the panelists discuss their experiences with model fitting, GLMs/machine learning, dimensionality reduction, and deep learning.



Other panels:



Second panel, about linear systems, real neurons, and dynamic networks.Third panel, about stochastic processes, including Bayes, decision-making, optimal control, reinforcement learning, and causality.Fourth panel, about basics in deep learning, including Linear deep learning, Pytorch, multi-layer-perceptrons, optimization, & regularization.Fifth panel, about “doing more with fewer parameters: Convnets, RNNs, attention & transformers, generative models (VAEs & GANs).Sixth panel, about advanced topics in deep learning: unsupervised & self-supervised learning, reinforcement learning, continual learning/causality.
]]>
                </itunes:summary>
                                                                            <itunes:duration>01:27:12</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 110 Catherine Stinson and Jessica Thompson: Neuro-AI Explanation]]>
                </title>
                <pubDate>Tue, 06 Jul 2021 14:38:41 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-110-catherine-stinson-and-jessica-thompson-neuro-ai-explanation</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-110-catherine-stinson-and-jessica-thompson-neuro-ai-explanation</link>
                                <description>
                                            <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2021/07/art-stinson-thompson-01.jpg" alt="" class="wp-image-1247" />



<div class="wp-block-image"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></div>



<p>Catherine, Jess, and I use some of the ideas from their recent papers to discuss how different types of explanations in neuroscience and AI could be unified into explanations of intelligence, natural or artificial. Catherine has written about how models are related to the target system they are built to explain. She suggests both the model and the target system should be considered as instantiations of a specific kind of phenomenon, and explanation is a product of relating the model and the target system to that specific aspect they both share. Jess has suggested we shift our focus of explanation from objects – like a brain area or a deep learning model – to the shared class of phenomenon performed by those objects. Doing so may help bridge the gap between the different forms of explanation currently used in neuroscience and AI. We also discuss Henk de Regt’s conception of scientific understanding and its relation to explanation (they’re different!), and plenty more.</p>



<ul class="blocks-gallery-grid"><li class="blocks-gallery-item"><img width="250" height="270" src="https://braininspired.co/wp-content/uploads/2021/07/download-1.jpeg" alt="" class="wp-image-1250" /></li><li class="blocks-gallery-item"><img width="250" height="300" src="https://braininspired.co/wp-content/uploads/2021/07/headshot2020-1.jpg" alt="" class="wp-image-1251" /></li></ul>



<ul><li><a href="https://www.catherinestinson.ca/">Catherine’s website</a>.</li><li><a href="https://thompsonj.github.io/discussion-excerpt">Jessica’s blog</a>.</li><li>Twitter: Jess: <a href="https://twitter.com/tsonj">@tsonj</a>.</li><li>Related papers<ul><li><a href="https://www.catherinestinson.ca/Files/Papers/Artificial_Neurons_preprint.pdf">From Implausible Artificial Neurons to Idealized Cognitive Models: Rebooting Philosophy of Artificial Intelligence</a> – Catherine</li><li><a href="https://psyarxiv.com/5g3pn">Forms of explanation and understanding for neuroscience and artificial intelligence</a> – Jess</li></ul></li><li>Jess is a postdoc in Chris Summerfield’s lab, and <a href="https://braininspired.co/podcast/95/">Chris and San Gershman were on a recent episode</a>.</li><li><a href="https://www.amazon.com/gp/product/0197510264/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=0197510264&amp;linkId=39d73128189707346c55b6f16d794aad">Understanding Scientific Understanding</a> by Henk de Regt.</li></ul>



<p>Timestamps:<br />0:00 – Intro<br />11:11 – Background and approaches<br />27:00 – Understanding distinct from explanation<br />36:00 – Explanations as programs (early explanation)<br />40:42 – Explaining classes of phenomena<br />52:05 – Constitutive (neuro) vs. etiological (AI) explanations<br />1:04:04 – Do nonphysical objects count for explanation?<br />1:10:51 – Advice for early philosopher/scientists</p>
]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[








Catherine, Jess, and I use some of the ideas from their recent papers to discuss how different types of explanations in neuroscience and AI could be unified into explanations of intelligence, natural or artificial. Catherine has written about how models are related to the target system they are built to explain. She suggests both the model and the target system should be considered as instantiations of a specific kind of phenomenon, and explanation is a product of relating the model and the target system to that specific aspect they both share. Jess has suggested we shift our focus of explanation from objects – like a brain area or a deep learning model – to the shared class of phenomenon performed by those objects. Doing so may help bridge the gap between the different forms of explanation currently used in neuroscience and AI. We also discuss Henk de Regt’s conception of scientific understanding and its relation to explanation (they’re different!), and plenty more.







Catherine’s website.Jessica’s blog.Twitter: Jess: @tsonj.Related papersFrom Implausible Artificial Neurons to Idealized Cognitive Models: Rebooting Philosophy of Artificial Intelligence – CatherineForms of explanation and understanding for neuroscience and artificial intelligence – JessJess is a postdoc in Chris Summerfield’s lab, and Chris and San Gershman were on a recent episode.Understanding Scientific Understanding by Henk de Regt.



Timestamps:0:00 – Intro11:11 – Background and approaches27:00 – Understanding distinct from explanation36:00 – Explanations as programs (early explanation)40:42 – Explaining classes of phenomena52:05 – Constitutive (neuro) vs. etiological (AI) explanations1:04:04 – Do nonphysical objects count for explanation?1:10:51 – Advice for early philosopher/scientists
]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 110 Catherine Stinson and Jessica Thompson: Neuro-AI Explanation]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2021/07/art-stinson-thompson-01.jpg" alt="" class="wp-image-1247" />



<div class="wp-block-image"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></div>



<p>Catherine, Jess, and I use some of the ideas from their recent papers to discuss how different types of explanations in neuroscience and AI could be unified into explanations of intelligence, natural or artificial. Catherine has written about how models are related to the target system they are built to explain. She suggests both the model and the target system should be considered as instantiations of a specific kind of phenomenon, and explanation is a product of relating the model and the target system to that specific aspect they both share. Jess has suggested we shift our focus of explanation from objects – like a brain area or a deep learning model – to the shared class of phenomenon performed by those objects. Doing so may help bridge the gap between the different forms of explanation currently used in neuroscience and AI. We also discuss Henk de Regt’s conception of scientific understanding and its relation to explanation (they’re different!), and plenty more.</p>



<ul class="blocks-gallery-grid"><li class="blocks-gallery-item"><img width="250" height="270" src="https://braininspired.co/wp-content/uploads/2021/07/download-1.jpeg" alt="" class="wp-image-1250" /></li><li class="blocks-gallery-item"><img width="250" height="300" src="https://braininspired.co/wp-content/uploads/2021/07/headshot2020-1.jpg" alt="" class="wp-image-1251" /></li></ul>



<ul><li><a href="https://www.catherinestinson.ca/">Catherine’s website</a>.</li><li><a href="https://thompsonj.github.io/discussion-excerpt">Jessica’s blog</a>.</li><li>Twitter: Jess: <a href="https://twitter.com/tsonj">@tsonj</a>.</li><li>Related papers<ul><li><a href="https://www.catherinestinson.ca/Files/Papers/Artificial_Neurons_preprint.pdf">From Implausible Artificial Neurons to Idealized Cognitive Models: Rebooting Philosophy of Artificial Intelligence</a> – Catherine</li><li><a href="https://psyarxiv.com/5g3pn">Forms of explanation and understanding for neuroscience and artificial intelligence</a> – Jess</li></ul></li><li>Jess is a postdoc in Chris Summerfield’s lab, and <a href="https://braininspired.co/podcast/95/">Chris and San Gershman were on a recent episode</a>.</li><li><a href="https://www.amazon.com/gp/product/0197510264/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=0197510264&amp;linkId=39d73128189707346c55b6f16d794aad">Understanding Scientific Understanding</a> by Henk de Regt.</li></ul>



<p>Timestamps:<br />0:00 – Intro<br />11:11 – Background and approaches<br />27:00 – Understanding distinct from explanation<br />36:00 – Explanations as programs (early explanation)<br />40:42 – Explaining classes of phenomena<br />52:05 – Constitutive (neuro) vs. etiological (AI) explanations<br />1:04:04 – Do nonphysical objects count for explanation?<br />1:10:51 – Advice for early philosopher/scientists</p>
]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/110-Catherine-Stinson-Jessica-Thompson-public.mp3" length="81930159"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[








Catherine, Jess, and I use some of the ideas from their recent papers to discuss how different types of explanations in neuroscience and AI could be unified into explanations of intelligence, natural or artificial. Catherine has written about how models are related to the target system they are built to explain. She suggests both the model and the target system should be considered as instantiations of a specific kind of phenomenon, and explanation is a product of relating the model and the target system to that specific aspect they both share. Jess has suggested we shift our focus of explanation from objects – like a brain area or a deep learning model – to the shared class of phenomenon performed by those objects. Doing so may help bridge the gap between the different forms of explanation currently used in neuroscience and AI. We also discuss Henk de Regt’s conception of scientific understanding and its relation to explanation (they’re different!), and plenty more.







Catherine’s website.Jessica’s blog.Twitter: Jess: @tsonj.Related papersFrom Implausible Artificial Neurons to Idealized Cognitive Models: Rebooting Philosophy of Artificial Intelligence – CatherineForms of explanation and understanding for neuroscience and artificial intelligence – JessJess is a postdoc in Chris Summerfield’s lab, and Chris and San Gershman were on a recent episode.Understanding Scientific Understanding by Henk de Regt.



Timestamps:0:00 – Intro11:11 – Background and approaches27:00 – Understanding distinct from explanation36:00 – Explanations as programs (early explanation)40:42 – Explaining classes of phenomena52:05 – Constitutive (neuro) vs. etiological (AI) explanations1:04:04 – Do nonphysical objects count for explanation?1:10:51 – Advice for early philosopher/scientists
]]>
                </itunes:summary>
                                                                            <itunes:duration>01:25:02</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 109 Mark Bickhard: Interactivism]]>
                </title>
                <pubDate>Sat, 26 Jun 2021 10:57:31 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-109-mark-bickhard-interactivism</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-109-mark-bickhard-interactivism</link>
                                <description>
                                            <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2021/06/art-bickhard-01.jpg" alt="" class="wp-image-1244" />



<div class="wp-block-image"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></div>



<div class="wp-block-image"><img width="300" height="300" src="https://braininspired.co/wp-content/uploads/2021/06/headshot.jpeg" alt="" class="wp-image-1245" /></div>



<p>Mark and I discuss a wide range of topics surrounding his Interactivism framework for explaining cognition. Interactivism stems from Mark’s account of representations and how what we represent in our minds is related to the external world – a challenge that has plagued the mind-body problem since the beginning. Basically, representations are anticipated interactions with the world, that can be true (if enacting one helps an organism maintain its thermodynamic relation with the world) or false (if it doesn’t). And representations are functional, in that they function to maintain far from equilibrium thermodynamics for the organism for self-maintenance. Over the years, Mark has filled out Interactivism, starting with a process metaphysics foundation and building from there to account for representations, how our brains might implement representations, and why AI is hindered by our modern “encoding” version of representation. We also compare interactivism to other similar frameworks, like enactivism, predictive processing, and the free energy principle.</p>



<p><br />For related discussions on the foundations (and issues of) representations, check out <a href="https://braininspired.co/podcast/60/">episode 60 with Michael Rescorla</a>, <a href="https://braininspired.co/podcast/61/">episode 61 with Jörn Diedrichsen and Niko Kriegeskorte</a>, and especially <a href="https://braininspired.co/podcast/79/">episode 79 with Romain Brette</a>.</p>



<ul><li><a href="https://www.lehigh.edu/~mhb0/mhb0.html">Mark’s website</a>.</li><li>Related papers<ul><li><a href="http://www.lehigh.edu/~mhb0/InteractivismManifesto.pdf">Interactivism: A manifesto</a>.</li><li>Plenty of <a href="https://www.lehigh.edu/~mhb0/pubspage.html">other papers available</a> via his website.</li></ul></li><li>Also mentioned:<ul><li><a href="https://www.amazon.com/gp/product/B01A0BGRJ6/ref=as_li_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;camp=1789&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=B01A0BGRJ6&amp;linkId=c9e5de9ee873e3d3c12adca0d484b0e6">The First Half Second The Microgenesis and Temporal Dynamics of Unconscious and Conscious Visual Processes</a>. 2006, Haluk Ögmen, Bruno G. Breitmeyer</li><li><a href="https://www.urmc.rochester.edu/labs/nedergaard.aspx">Maiken Nedergaard</a>‘s <a href="https://neuroscience.stanford.edu/videos/nightlife-brain">work on sleep</a>.</li></ul></li></ul>



<p>Timestamps<br />0:00 – Intro<br />5:06 – Previous and upcoming book<br />9:17 – Origins of Mark’s thinking<br />14:31 – Process vs. substance metaphysics<br />27:10 – Kinds of emergence<br />32:16 – Normative emergence to normative function and representation<br />36:33 – Representation in Interactivism<br />46:07 – Situation knowledge<br />54:02 – Interactivism vs. Enactivism<br />1:09:37 – Interactivism vs Predictive/Bayesian brain<br />1:17:39 – Interactivism vs. Free energy principle<br />1:21:56 – Microgenesis<br />1:33:11 – Implications for neuroscience<br />1:38:18 – Learning as variation and selection<br />1:45:07 – Implications for AI<br />1:55:06 – Everything is a clock<br />1:58:14 – Is Mark a philosopher?</p>
]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[












Mark and I discuss a wide range of topics surrounding his Interactivism framework for explaining cognition. Interactivism stems from Mark’s account of representations and how what we represent in our minds is related to the external world – a challenge that has plagued the mind-body problem since the beginning. Basically, representations are anticipated interactions with the world, that can be true (if enacting one helps an organism maintain its thermodynamic relation with the world) or false (if it doesn’t). And representations are functional, in that they function to maintain far from equilibrium thermodynamics for the organism for self-maintenance. Over the years, Mark has filled out Interactivism, starting with a process metaphysics foundation and building from there to account for representations, how our brains might implement representations, and why AI is hindered by our modern “encoding” version of representation. We also compare interactivism to other similar frameworks, like enactivism, predictive processing, and the free energy principle.



For related discussions on the foundations (and issues of) representations, check out episode 60 with Michael Rescorla, episode 61 with Jörn Diedrichsen and Niko Kriegeskorte, and especially episode 79 with Romain Brette.



Mark’s website.Related papersInteractivism: A manifesto.Plenty of other papers available via his website.Also mentioned:The First Half Second The Microgenesis and Temporal Dynamics of Unconscious and Conscious Visual Processes. 2006, Haluk Ögmen, Bruno G. BreitmeyerMaiken Nedergaard‘s work on sleep.



Timestamps0:00 – Intro5:06 – Previous and upcoming book9:17 – Origins of Mark’s thinking14:31 – Process vs. substance metaphysics27:10 – Kinds of emergence32:16 – Normative emergence to normative function and representation36:33 – Representation in Interactivism46:07 – Situation knowledge54:02 – Interactivism vs. Enactivism1:09:37 – Interactivism vs Predictive/Bayesian brain1:17:39 – Interactivism vs. Free energy principle1:21:56 – Microgenesis1:33:11 – Implications for neuroscience1:38:18 – Learning as variation and selection1:45:07 – Implications for AI1:55:06 – Everything is a clock1:58:14 – Is Mark a philosopher?
]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 109 Mark Bickhard: Interactivism]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2021/06/art-bickhard-01.jpg" alt="" class="wp-image-1244" />



<div class="wp-block-image"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></div>



<div class="wp-block-image"><img width="300" height="300" src="https://braininspired.co/wp-content/uploads/2021/06/headshot.jpeg" alt="" class="wp-image-1245" /></div>



<p>Mark and I discuss a wide range of topics surrounding his Interactivism framework for explaining cognition. Interactivism stems from Mark’s account of representations and how what we represent in our minds is related to the external world – a challenge that has plagued the mind-body problem since the beginning. Basically, representations are anticipated interactions with the world, that can be true (if enacting one helps an organism maintain its thermodynamic relation with the world) or false (if it doesn’t). And representations are functional, in that they function to maintain far from equilibrium thermodynamics for the organism for self-maintenance. Over the years, Mark has filled out Interactivism, starting with a process metaphysics foundation and building from there to account for representations, how our brains might implement representations, and why AI is hindered by our modern “encoding” version of representation. We also compare interactivism to other similar frameworks, like enactivism, predictive processing, and the free energy principle.</p>



<p><br />For related discussions on the foundations (and issues of) representations, check out <a href="https://braininspired.co/podcast/60/">episode 60 with Michael Rescorla</a>, <a href="https://braininspired.co/podcast/61/">episode 61 with Jörn Diedrichsen and Niko Kriegeskorte</a>, and especially <a href="https://braininspired.co/podcast/79/">episode 79 with Romain Brette</a>.</p>



<ul><li><a href="https://www.lehigh.edu/~mhb0/mhb0.html">Mark’s website</a>.</li><li>Related papers<ul><li><a href="http://www.lehigh.edu/~mhb0/InteractivismManifesto.pdf">Interactivism: A manifesto</a>.</li><li>Plenty of <a href="https://www.lehigh.edu/~mhb0/pubspage.html">other papers available</a> via his website.</li></ul></li><li>Also mentioned:<ul><li><a href="https://www.amazon.com/gp/product/B01A0BGRJ6/ref=as_li_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;camp=1789&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=B01A0BGRJ6&amp;linkId=c9e5de9ee873e3d3c12adca0d484b0e6">The First Half Second The Microgenesis and Temporal Dynamics of Unconscious and Conscious Visual Processes</a>. 2006, Haluk Ögmen, Bruno G. Breitmeyer</li><li><a href="https://www.urmc.rochester.edu/labs/nedergaard.aspx">Maiken Nedergaard</a>‘s <a href="https://neuroscience.stanford.edu/videos/nightlife-brain">work on sleep</a>.</li></ul></li></ul>



<p>Timestamps<br />0:00 – Intro<br />5:06 – Previous and upcoming book<br />9:17 – Origins of Mark’s thinking<br />14:31 – Process vs. substance metaphysics<br />27:10 – Kinds of emergence<br />32:16 – Normative emergence to normative function and representation<br />36:33 – Representation in Interactivism<br />46:07 – Situation knowledge<br />54:02 – Interactivism vs. Enactivism<br />1:09:37 – Interactivism vs Predictive/Bayesian brain<br />1:17:39 – Interactivism vs. Free energy principle<br />1:21:56 – Microgenesis<br />1:33:11 – Implications for neuroscience<br />1:38:18 – Learning as variation and selection<br />1:45:07 – Implications for AI<br />1:55:06 – Everything is a clock<br />1:58:14 – Is Mark a philosopher?</p>
]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/109-Mark-Bickhard-public.mp3" length="119077909"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[












Mark and I discuss a wide range of topics surrounding his Interactivism framework for explaining cognition. Interactivism stems from Mark’s account of representations and how what we represent in our minds is related to the external world – a challenge that has plagued the mind-body problem since the beginning. Basically, representations are anticipated interactions with the world, that can be true (if enacting one helps an organism maintain its thermodynamic relation with the world) or false (if it doesn’t). And representations are functional, in that they function to maintain far from equilibrium thermodynamics for the organism for self-maintenance. Over the years, Mark has filled out Interactivism, starting with a process metaphysics foundation and building from there to account for representations, how our brains might implement representations, and why AI is hindered by our modern “encoding” version of representation. We also compare interactivism to other similar frameworks, like enactivism, predictive processing, and the free energy principle.



For related discussions on the foundations (and issues of) representations, check out episode 60 with Michael Rescorla, episode 61 with Jörn Diedrichsen and Niko Kriegeskorte, and especially episode 79 with Romain Brette.



Mark’s website.Related papersInteractivism: A manifesto.Plenty of other papers available via his website.Also mentioned:The First Half Second The Microgenesis and Temporal Dynamics of Unconscious and Conscious Visual Processes. 2006, Haluk Ögmen, Bruno G. BreitmeyerMaiken Nedergaard‘s work on sleep.



Timestamps0:00 – Intro5:06 – Previous and upcoming book9:17 – Origins of Mark’s thinking14:31 – Process vs. substance metaphysics27:10 – Kinds of emergence32:16 – Normative emergence to normative function and representation36:33 – Representation in Interactivism46:07 – Situation knowledge54:02 – Interactivism vs. Enactivism1:09:37 – Interactivism vs Predictive/Bayesian brain1:17:39 – Interactivism vs. Free energy principle1:21:56 – Microgenesis1:33:11 – Implications for neuroscience1:38:18 – Learning as variation and selection1:45:07 – Implications for AI1:55:06 – Everything is a clock1:58:14 – Is Mark a philosopher?
]]>
                </itunes:summary>
                                                                            <itunes:duration>02:03:43</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 108 Grace Lindsay: Models of the Mind]]>
                </title>
                <pubDate>Wed, 16 Jun 2021 13:56:49 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-108-grace-lindsay-models-of-the-mind</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-108-grace-lindsay-models-of-the-mind</link>
                                <description>
                                            <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2021/06/art-lindsay-01.jpg" alt="" class="wp-image-1241" />



<div class="wp-block-image"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></div>



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2018/09/headshot_lindsay.jpg" alt="" class="wp-image-569" width="225" height="305" /></div>



<ul><li><a href="https://gracewlindsay.com/">Grace’s website</a></li><li>Twitter: <a href="https://twitter.com/neurograce">@neurograce</a>.</li><li><a href="https://www.amazon.com/gp/product/1472966422/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=1472966422&amp;linkId=cf554d0eccb5e8485b7e414c571f1ec7">Models of the Mind: How Physics, Engineering and Mathematics Have Shaped Our Understanding of the Brain</a>.</li><li>We talked about Grace’s work using convolutional neural networks to study vision and attention <a href="https://braininspired.co/podcast/11/">way back on episode 11</a>.</li></ul>



<div class="wp-block-image"><a href="https://www.amazon.com/gp/product/1472966422/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=1472966422&amp;linkId=cf554d0eccb5e8485b7e414c571f1ec7" target="_blank" rel="noreferrer noopener"><img src="https://braininspired.co/wp-content/uploads/2021/06/bookthumb.jpg" alt="" class="wp-image-1242" width="156" height="250" /></a></div>



<p>Grace and I discuss her new book <a href="https://www.amazon.com/gp/product/1472966422/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=1472966422&amp;linkId=cf554d0eccb5e8485b7e414c571f1ec7">Models of the Mind</a>, about the blossoming and conceptual foundations of the computational approach to study minds and brains. Each chapter of the book focuses on one major topic and provides historical context, the major concepts that connect models to brain functions, and the current landscape of related research endeavors. We cover a handful of those during the episode, including the birth of AI, the difference between math in physics and neuroscience, determining the neural code and how Shannon information theory plays a role, whether it’s possible to guess a brain function based on what we know about some brain structure, “grand unified theories” of the brain. We also digress and explore topics beyond the book. </p>



<p>Timestamps<br />0:00 – Intro<br />4:19 – Cognition beyond vision<br />12:38 – Models of the Mind – book overview<br />14:00 – The good and bad of using math<br />21:33 – I quiz Grace on her own book<br />25:03 – Birth of AI and computational approach<br />38:00 – Rediscovering old math for new neuroscience<br />41:00 – Topology as good math to know now<br />45:29 – Physics vs. neuroscience math<br />49:32 – Neural code and information theory<br />55:03 – Rate code vs. timing code<br />59:18 – Graph theory – can you deduce function from structure?<br />1:06:56 – Multiple realizability<br />1:13:01 – Grand Unified theories of the brain</p>
]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[












Grace’s websiteTwitter: @neurograce.Models of the Mind: How Physics, Engineering and Mathematics Have Shaped Our Understanding of the Brain.We talked about Grace’s work using convolutional neural networks to study vision and attention way back on episode 11.







Grace and I discuss her new book Models of the Mind, about the blossoming and conceptual foundations of the computational approach to study minds and brains. Each chapter of the book focuses on one major topic and provides historical context, the major concepts that connect models to brain functions, and the current landscape of related research endeavors. We cover a handful of those during the episode, including the birth of AI, the difference between math in physics and neuroscience, determining the neural code and how Shannon information theory plays a role, whether it’s possible to guess a brain function based on what we know about some brain structure, “grand unified theories” of the brain. We also digress and explore topics beyond the book. 



Timestamps0:00 – Intro4:19 – Cognition beyond vision12:38 – Models of the Mind – book overview14:00 – The good and bad of using math21:33 – I quiz Grace on her own book25:03 – Birth of AI and computational approach38:00 – Rediscovering old math for new neuroscience41:00 – Topology as good math to know now45:29 – Physics vs. neuroscience math49:32 – Neural code and information theory55:03 – Rate code vs. timing code59:18 – Graph theory – can you deduce function from structure?1:06:56 – Multiple realizability1:13:01 – Grand Unified theories of the brain
]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 108 Grace Lindsay: Models of the Mind]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2021/06/art-lindsay-01.jpg" alt="" class="wp-image-1241" />



<div class="wp-block-image"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></div>



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2018/09/headshot_lindsay.jpg" alt="" class="wp-image-569" width="225" height="305" /></div>



<ul><li><a href="https://gracewlindsay.com/">Grace’s website</a></li><li>Twitter: <a href="https://twitter.com/neurograce">@neurograce</a>.</li><li><a href="https://www.amazon.com/gp/product/1472966422/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=1472966422&amp;linkId=cf554d0eccb5e8485b7e414c571f1ec7">Models of the Mind: How Physics, Engineering and Mathematics Have Shaped Our Understanding of the Brain</a>.</li><li>We talked about Grace’s work using convolutional neural networks to study vision and attention <a href="https://braininspired.co/podcast/11/">way back on episode 11</a>.</li></ul>



<div class="wp-block-image"><a href="https://www.amazon.com/gp/product/1472966422/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=1472966422&amp;linkId=cf554d0eccb5e8485b7e414c571f1ec7" target="_blank" rel="noreferrer noopener"><img src="https://braininspired.co/wp-content/uploads/2021/06/bookthumb.jpg" alt="" class="wp-image-1242" width="156" height="250" /></a></div>



<p>Grace and I discuss her new book <a href="https://www.amazon.com/gp/product/1472966422/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=1472966422&amp;linkId=cf554d0eccb5e8485b7e414c571f1ec7">Models of the Mind</a>, about the blossoming and conceptual foundations of the computational approach to study minds and brains. Each chapter of the book focuses on one major topic and provides historical context, the major concepts that connect models to brain functions, and the current landscape of related research endeavors. We cover a handful of those during the episode, including the birth of AI, the difference between math in physics and neuroscience, determining the neural code and how Shannon information theory plays a role, whether it’s possible to guess a brain function based on what we know about some brain structure, “grand unified theories” of the brain. We also digress and explore topics beyond the book. </p>



<p>Timestamps<br />0:00 – Intro<br />4:19 – Cognition beyond vision<br />12:38 – Models of the Mind – book overview<br />14:00 – The good and bad of using math<br />21:33 – I quiz Grace on her own book<br />25:03 – Birth of AI and computational approach<br />38:00 – Rediscovering old math for new neuroscience<br />41:00 – Topology as good math to know now<br />45:29 – Physics vs. neuroscience math<br />49:32 – Neural code and information theory<br />55:03 – Rate code vs. timing code<br />59:18 – Graph theory – can you deduce function from structure?<br />1:06:56 – Multiple realizability<br />1:13:01 – Grand Unified theories of the brain</p>
]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/108-Grace-Lindsay-public.mp3" length="83062339"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[












Grace’s websiteTwitter: @neurograce.Models of the Mind: How Physics, Engineering and Mathematics Have Shaped Our Understanding of the Brain.We talked about Grace’s work using convolutional neural networks to study vision and attention way back on episode 11.







Grace and I discuss her new book Models of the Mind, about the blossoming and conceptual foundations of the computational approach to study minds and brains. Each chapter of the book focuses on one major topic and provides historical context, the major concepts that connect models to brain functions, and the current landscape of related research endeavors. We cover a handful of those during the episode, including the birth of AI, the difference between math in physics and neuroscience, determining the neural code and how Shannon information theory plays a role, whether it’s possible to guess a brain function based on what we know about some brain structure, “grand unified theories” of the brain. We also digress and explore topics beyond the book. 



Timestamps0:00 – Intro4:19 – Cognition beyond vision12:38 – Models of the Mind – book overview14:00 – The good and bad of using math21:33 – I quiz Grace on her own book25:03 – Birth of AI and computational approach38:00 – Rediscovering old math for new neuroscience41:00 – Topology as good math to know now45:29 – Physics vs. neuroscience math49:32 – Neural code and information theory55:03 – Rate code vs. timing code59:18 – Graph theory – can you deduce function from structure?1:06:56 – Multiple realizability1:13:01 – Grand Unified theories of the brain
]]>
                </itunes:summary>
                                                                            <itunes:duration>01:26:12</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 107 Steve Fleming: Know Thyself]]>
                </title>
                <pubDate>Sun, 06 Jun 2021 12:38:31 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-107-steve-fleming-know-thyself</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-107-steve-fleming-know-thyself</link>
                                <description>
                                            <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2021/06/art-fleming-01.jpg" alt="" class="wp-image-1238" />



<div class="wp-block-image"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></div>



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2021/02/headnewsteve_fleming.jpg" alt="" class="wp-image-1177" width="192" height="240" /></div>



<p>Steve and I discuss many topics from his new book <a href="https://www.amazon.com/gp/product/1541672844/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=1541672844&amp;linkId=087d7f9a45c4b8ac8166ec2d946236e3">Know Thyself: The Science of Self-Awareness</a>. The book covers the full range of what we know about metacognition and self-awareness, including how brains might underlie metacognitive behavior, computational models to explain mechanisms of metacognition, how and why self-awareness evolved, which animals beyond humans harbor metacognition and how to test it, its role and potential origins in theory of mind and social interaction, how our metacognitive skills develop over our lifetimes, what our metacognitive skill tells us about our other psychological traits, and so on. We also discuss what it might look like when we are able to build metacognitive AI, and whether that’s even a good idea.</p>



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2021/06/bookImage.jpg" alt="" class="wp-image-1239" width="162" height="250" /></div>



<ul><li>Steve’s lab: <a href="https://metacoglab.org/">The MetaLab</a>.</li><li>Twitter: <a href="https://twitter.com/smfleming">@smfleming</a>.</li><li><a href="https://braininspired.co/podcast/99/">Steve and Hakwan Lau on episode 99 about consciousness</a>. </li><li>Papers:<ul><li>Metacognitive training: <a href="https://static1.squarespace.com/static/5616b377e4b0670f148ff742/t/5d0f80760ff3ce00011762f1/1561297019002/CarpenterJEPG2019.pdf">Domain-General Enhancements of Metacognitive Ability Through Adaptive Training</a></li></ul></li><li>The book:<ul><li><a href="https://www.amazon.com/gp/product/1541672844/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=1541672844&amp;linkId=087d7f9a45c4b8ac8166ec2d946236e3">Know Thyself: The Science of Self-Awareness</a>.</li></ul></li></ul>



<p>Timestamps<br />0:00 – Intro<br />3:25 – Steve’s Career<br />10:43 – Sub-personal vs. personal metacognition<br />17:55 – Meditation and metacognition<br />20:51 – Replay tools for mind-wandering<br />30:56 – Evolutionary cultural origins of self-awareness<br />45:02 – Animal metacognition<br />54:25 – Aging and self-awareness<br />58:32 – Is more always better?<br />1:00:41 – Political dogmatism and overconfidence<br />1:08:56 – Reliance on AI<br />1:15:15 – Building self-aware AI<br />1:23:20 – Future evolution of metacognition</p>



<p></p>
]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[












Steve and I discuss many topics from his new book Know Thyself: The Science of Self-Awareness. The book covers the full range of what we know about metacognition and self-awareness, including how brains might underlie metacognitive behavior, computational models to explain mechanisms of metacognition, how and why self-awareness evolved, which animals beyond humans harbor metacognition and how to test it, its role and potential origins in theory of mind and social interaction, how our metacognitive skills develop over our lifetimes, what our metacognitive skill tells us about our other psychological traits, and so on. We also discuss what it might look like when we are able to build metacognitive AI, and whether that’s even a good idea.







Steve’s lab: The MetaLab.Twitter: @smfleming.Steve and Hakwan Lau on episode 99 about consciousness. Papers:Metacognitive training: Domain-General Enhancements of Metacognitive Ability Through Adaptive TrainingThe book:Know Thyself: The Science of Self-Awareness.



Timestamps0:00 – Intro3:25 – Steve’s Career10:43 – Sub-personal vs. personal metacognition17:55 – Meditation and metacognition20:51 – Replay tools for mind-wandering30:56 – Evolutionary cultural origins of self-awareness45:02 – Animal metacognition54:25 – Aging and self-awareness58:32 – Is more always better?1:00:41 – Political dogmatism and overconfidence1:08:56 – Reliance on AI1:15:15 – Building self-aware AI1:23:20 – Future evolution of metacognition




]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 107 Steve Fleming: Know Thyself]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2021/06/art-fleming-01.jpg" alt="" class="wp-image-1238" />



<div class="wp-block-image"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></div>



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2021/02/headnewsteve_fleming.jpg" alt="" class="wp-image-1177" width="192" height="240" /></div>



<p>Steve and I discuss many topics from his new book <a href="https://www.amazon.com/gp/product/1541672844/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=1541672844&amp;linkId=087d7f9a45c4b8ac8166ec2d946236e3">Know Thyself: The Science of Self-Awareness</a>. The book covers the full range of what we know about metacognition and self-awareness, including how brains might underlie metacognitive behavior, computational models to explain mechanisms of metacognition, how and why self-awareness evolved, which animals beyond humans harbor metacognition and how to test it, its role and potential origins in theory of mind and social interaction, how our metacognitive skills develop over our lifetimes, what our metacognitive skill tells us about our other psychological traits, and so on. We also discuss what it might look like when we are able to build metacognitive AI, and whether that’s even a good idea.</p>



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2021/06/bookImage.jpg" alt="" class="wp-image-1239" width="162" height="250" /></div>



<ul><li>Steve’s lab: <a href="https://metacoglab.org/">The MetaLab</a>.</li><li>Twitter: <a href="https://twitter.com/smfleming">@smfleming</a>.</li><li><a href="https://braininspired.co/podcast/99/">Steve and Hakwan Lau on episode 99 about consciousness</a>. </li><li>Papers:<ul><li>Metacognitive training: <a href="https://static1.squarespace.com/static/5616b377e4b0670f148ff742/t/5d0f80760ff3ce00011762f1/1561297019002/CarpenterJEPG2019.pdf">Domain-General Enhancements of Metacognitive Ability Through Adaptive Training</a></li></ul></li><li>The book:<ul><li><a href="https://www.amazon.com/gp/product/1541672844/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=1541672844&amp;linkId=087d7f9a45c4b8ac8166ec2d946236e3">Know Thyself: The Science of Self-Awareness</a>.</li></ul></li></ul>



<p>Timestamps<br />0:00 – Intro<br />3:25 – Steve’s Career<br />10:43 – Sub-personal vs. personal metacognition<br />17:55 – Meditation and metacognition<br />20:51 – Replay tools for mind-wandering<br />30:56 – Evolutionary cultural origins of self-awareness<br />45:02 – Animal metacognition<br />54:25 – Aging and self-awareness<br />58:32 – Is more always better?<br />1:00:41 – Political dogmatism and overconfidence<br />1:08:56 – Reliance on AI<br />1:15:15 – Building self-aware AI<br />1:23:20 – Future evolution of metacognition</p>



<p></p>
]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/107-Steve-Fleming-public.mp3" length="86123824"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[












Steve and I discuss many topics from his new book Know Thyself: The Science of Self-Awareness. The book covers the full range of what we know about metacognition and self-awareness, including how brains might underlie metacognitive behavior, computational models to explain mechanisms of metacognition, how and why self-awareness evolved, which animals beyond humans harbor metacognition and how to test it, its role and potential origins in theory of mind and social interaction, how our metacognitive skills develop over our lifetimes, what our metacognitive skill tells us about our other psychological traits, and so on. We also discuss what it might look like when we are able to build metacognitive AI, and whether that’s even a good idea.







Steve’s lab: The MetaLab.Twitter: @smfleming.Steve and Hakwan Lau on episode 99 about consciousness. Papers:Metacognitive training: Domain-General Enhancements of Metacognitive Ability Through Adaptive TrainingThe book:Know Thyself: The Science of Self-Awareness.



Timestamps0:00 – Intro3:25 – Steve’s Career10:43 – Sub-personal vs. personal metacognition17:55 – Meditation and metacognition20:51 – Replay tools for mind-wandering30:56 – Evolutionary cultural origins of self-awareness45:02 – Animal metacognition54:25 – Aging and self-awareness58:32 – Is more always better?1:00:41 – Political dogmatism and overconfidence1:08:56 – Reliance on AI1:15:15 – Building self-aware AI1:23:20 – Future evolution of metacognition




]]>
                </itunes:summary>
                                                                            <itunes:duration>01:29:24</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 106 Jacqueline Gottlieb and Robert Wilson: Deep Curiosity]]>
                </title>
                <pubDate>Thu, 27 May 2021 12:32:20 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-106-jacqueline-gottlieb-and-robert-wilson-deep-curiosity</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-106-jacqueline-gottlieb-and-robert-wilson-deep-curiosity</link>
                                <description>
                                            <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2021/05/art-gottlieb-and-wilson-01.jpg" alt="" class="wp-image-1233" />



<div class="wp-block-image"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></div>



<p>Jackie and Bob discuss their research and thinking about curiosity. </p>



<p>Jackie’s background is studying decision making and attention, recording neurons in nonhuman primates during eye movement tasks, and she’s broadly interested in how we adapt our ongoing behavior. Curiosity is crucial for this, so she recently has focused on behavioral strategies to exercise curiosity, developing tasks that test exploration, information sampling, uncertainty reduction, and intrinsic motivation. </p>



<p>Bob’s background is developing computational models of reinforcement learning (including the exploration-exploitation tradeoff) and decision making, and he behavior and neuroimaging data in humans to test the models. He’s broadly interested in how and whether we can understand brains and cognition using mathematical models. Recently he’s been working on a model for curiosity known as deep exploration, which suggests we make decisions by deeply simulating a handful of scenarios and choosing based on the simulation outcomes.</p>



<p>We also discuss how one should go about their career (qua curiosity), how eye movements compare with other windows into cognition, and whether we can and should create curious AI agents (Bob is an emphatic yes, and Jackie is slightly worried that will be the time to worry about AI).</p>



<ul class="blocks-gallery-grid"><li class="blocks-gallery-item"><img width="217" height="248" src="https://braininspired.co/wp-content/uploads/2021/05/picture-2710-1470182194-1.jpg" alt="" class="wp-image-1236" /></li><li class="blocks-gallery-item"><img width="422" height="630" src="https://braininspired.co/wp-content/uploads/2021/05/jgottlieb.jpg" alt="" class="wp-image-1234" /></li></ul>



<ul><li>Jackie’s lab: <a href="https://www.gottlieblab.com/">Jacqueline Gottlieb Laboratory at Columbia University</a>.</li><li>Bob’s lab: <a href="http://u.arizona.edu/~bob/index.html">Neuroscience of Reinforcement Learning and Decision Making</a>.</li><li>Twitter: Bob: <a href="https://twitter.com/NRDlab">@NRDLab</a> (Jackie’s not on twitter).</li><li>Related papers<ul><li><a href="https://9fabbb78-5b5b-4a1b-aeb0-b5443b6eac2f.filesusr.com/ugd/9cf124_51965780868c4847bdcc2648c03d7671.pdf">Curiosity, information demand and attentional priority</a>.</li><li><a href="https://psyarxiv.com/e9azw">Balancing exploration and exploitation with information and randomization</a>.</li><li><a href="https://psyarxiv.com/uj85c">Deep exploration as a unifying account of explore-exploit behavior</a>.</li></ul></li><li>Bob mentions an influential talk by Benjamin Van Roy:<ul><li><a href="http://videolectures.net/rldm2015_van_roy_function_randomization/">Generalization and Exploration via Value Function Randomization</a>.</li></ul></li><li>Bob mentions his paper with Anne Collins:<ul><li><a href="https://elifesciences.org/articles/49547https://elifesciences.org/articles/49547">Ten simple rules for the computational modeling of behavioral data</a>.</li></ul></li></ul>



<p>Timestamps:</p>



<p>0:00 – Intro<br />4:15 – Central scientific interests<br />8:32 – Advent of mathematical models<br />12:15 – Career exploration vs. exploitation<br />28:03 – Eye movements and active sensing<br />35:53 – Status of eye movements in neuroscience<br />44:16 – Why are we curious?<br />50:26 – Curiosity vs. Exploration vs. Intrinsic motivation<br />1:02:35 – Directed vs. random exploration<br />1:06:16 – Deep exploration<br />1:12:52 – How to know what to pay attention to<br />1:19:49 – Does AI need curiosity?<br />1:26:29 – What trait do you wish you had more of?</p>
]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[








Jackie and Bob discuss their research and thinking about curiosity. 



Jackie’s background is studying decision making and attention, recording neurons in nonhuman primates during eye movement tasks, and she’s broadly interested in how we adapt our ongoing behavior. Curiosity is crucial for this, so she recently has focused on behavioral strategies to exercise curiosity, developing tasks that test exploration, information sampling, uncertainty reduction, and intrinsic motivation. 



Bob’s background is developing computational models of reinforcement learning (including the exploration-exploitation tradeoff) and decision making, and he behavior and neuroimaging data in humans to test the models. He’s broadly interested in how and whether we can understand brains and cognition using mathematical models. Recently he’s been working on a model for curiosity known as deep exploration, which suggests we make decisions by deeply simulating a handful of scenarios and choosing based on the simulation outcomes.



We also discuss how one should go about their career (qua curiosity), how eye movements compare with other windows into cognition, and whether we can and should create curious AI agents (Bob is an emphatic yes, and Jackie is slightly worried that will be the time to worry about AI).







Jackie’s lab: Jacqueline Gottlieb Laboratory at Columbia University.Bob’s lab: Neuroscience of Reinforcement Learning and Decision Making.Twitter: Bob: @NRDLab (Jackie’s not on twitter).Related papersCuriosity, information demand and attentional priority.Balancing exploration and exploitation with information and randomization.Deep exploration as a unifying account of explore-exploit behavior.Bob mentions an influential talk by Benjamin Van Roy:Generalization and Exploration via Value Function Randomization.Bob mentions his paper with Anne Collins:Ten simple rules for the computational modeling of behavioral data.



Timestamps:



0:00 – Intro4:15 – Central scientific interests8:32 – Advent of mathematical models12:15 – Career exploration vs. exploitation28:03 – Eye movements and active sensing35:53 – Status of eye movements in neuroscience44:16 – Why are we curious?50:26 – Curiosity vs. Exploration vs. Intrinsic motivation1:02:35 – Directed vs. random exploration1:06:16 – Deep exploration1:12:52 – How to know what to pay attention to1:19:49 – Does AI need curiosity?1:26:29 – What trait do you wish you had more of?
]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 106 Jacqueline Gottlieb and Robert Wilson: Deep Curiosity]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2021/05/art-gottlieb-and-wilson-01.jpg" alt="" class="wp-image-1233" />



<div class="wp-block-image"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></div>



<p>Jackie and Bob discuss their research and thinking about curiosity. </p>



<p>Jackie’s background is studying decision making and attention, recording neurons in nonhuman primates during eye movement tasks, and she’s broadly interested in how we adapt our ongoing behavior. Curiosity is crucial for this, so she recently has focused on behavioral strategies to exercise curiosity, developing tasks that test exploration, information sampling, uncertainty reduction, and intrinsic motivation. </p>



<p>Bob’s background is developing computational models of reinforcement learning (including the exploration-exploitation tradeoff) and decision making, and he behavior and neuroimaging data in humans to test the models. He’s broadly interested in how and whether we can understand brains and cognition using mathematical models. Recently he’s been working on a model for curiosity known as deep exploration, which suggests we make decisions by deeply simulating a handful of scenarios and choosing based on the simulation outcomes.</p>



<p>We also discuss how one should go about their career (qua curiosity), how eye movements compare with other windows into cognition, and whether we can and should create curious AI agents (Bob is an emphatic yes, and Jackie is slightly worried that will be the time to worry about AI).</p>



<ul class="blocks-gallery-grid"><li class="blocks-gallery-item"><img width="217" height="248" src="https://braininspired.co/wp-content/uploads/2021/05/picture-2710-1470182194-1.jpg" alt="" class="wp-image-1236" /></li><li class="blocks-gallery-item"><img width="422" height="630" src="https://braininspired.co/wp-content/uploads/2021/05/jgottlieb.jpg" alt="" class="wp-image-1234" /></li></ul>



<ul><li>Jackie’s lab: <a href="https://www.gottlieblab.com/">Jacqueline Gottlieb Laboratory at Columbia University</a>.</li><li>Bob’s lab: <a href="http://u.arizona.edu/~bob/index.html">Neuroscience of Reinforcement Learning and Decision Making</a>.</li><li>Twitter: Bob: <a href="https://twitter.com/NRDlab">@NRDLab</a> (Jackie’s not on twitter).</li><li>Related papers<ul><li><a href="https://9fabbb78-5b5b-4a1b-aeb0-b5443b6eac2f.filesusr.com/ugd/9cf124_51965780868c4847bdcc2648c03d7671.pdf">Curiosity, information demand and attentional priority</a>.</li><li><a href="https://psyarxiv.com/e9azw">Balancing exploration and exploitation with information and randomization</a>.</li><li><a href="https://psyarxiv.com/uj85c">Deep exploration as a unifying account of explore-exploit behavior</a>.</li></ul></li><li>Bob mentions an influential talk by Benjamin Van Roy:<ul><li><a href="http://videolectures.net/rldm2015_van_roy_function_randomization/">Generalization and Exploration via Value Function Randomization</a>.</li></ul></li><li>Bob mentions his paper with Anne Collins:<ul><li><a href="https://elifesciences.org/articles/49547https://elifesciences.org/articles/49547">Ten simple rules for the computational modeling of behavioral data</a>.</li></ul></li></ul>



<p>Timestamps:</p>



<p>0:00 – Intro<br />4:15 – Central scientific interests<br />8:32 – Advent of mathematical models<br />12:15 – Career exploration vs. exploitation<br />28:03 – Eye movements and active sensing<br />35:53 – Status of eye movements in neuroscience<br />44:16 – Why are we curious?<br />50:26 – Curiosity vs. Exploration vs. Intrinsic motivation<br />1:02:35 – Directed vs. random exploration<br />1:06:16 – Deep exploration<br />1:12:52 – How to know what to pay attention to<br />1:19:49 – Does AI need curiosity?<br />1:26:29 – What trait do you wish you had more of?</p>
]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/106-Jackie-Gottlieb-and-Bob-Wilson-public.mp3" length="88508517"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[








Jackie and Bob discuss their research and thinking about curiosity. 



Jackie’s background is studying decision making and attention, recording neurons in nonhuman primates during eye movement tasks, and she’s broadly interested in how we adapt our ongoing behavior. Curiosity is crucial for this, so she recently has focused on behavioral strategies to exercise curiosity, developing tasks that test exploration, information sampling, uncertainty reduction, and intrinsic motivation. 



Bob’s background is developing computational models of reinforcement learning (including the exploration-exploitation tradeoff) and decision making, and he behavior and neuroimaging data in humans to test the models. He’s broadly interested in how and whether we can understand brains and cognition using mathematical models. Recently he’s been working on a model for curiosity known as deep exploration, which suggests we make decisions by deeply simulating a handful of scenarios and choosing based on the simulation outcomes.



We also discuss how one should go about their career (qua curiosity), how eye movements compare with other windows into cognition, and whether we can and should create curious AI agents (Bob is an emphatic yes, and Jackie is slightly worried that will be the time to worry about AI).







Jackie’s lab: Jacqueline Gottlieb Laboratory at Columbia University.Bob’s lab: Neuroscience of Reinforcement Learning and Decision Making.Twitter: Bob: @NRDLab (Jackie’s not on twitter).Related papersCuriosity, information demand and attentional priority.Balancing exploration and exploitation with information and randomization.Deep exploration as a unifying account of explore-exploit behavior.Bob mentions an influential talk by Benjamin Van Roy:Generalization and Exploration via Value Function Randomization.Bob mentions his paper with Anne Collins:Ten simple rules for the computational modeling of behavioral data.



Timestamps:



0:00 – Intro4:15 – Central scientific interests8:32 – Advent of mathematical models12:15 – Career exploration vs. exploitation28:03 – Eye movements and active sensing35:53 – Status of eye movements in neuroscience44:16 – Why are we curious?50:26 – Curiosity vs. Exploration vs. Intrinsic motivation1:02:35 – Directed vs. random exploration1:06:16 – Deep exploration1:12:52 – How to know what to pay attention to1:19:49 – Does AI need curiosity?1:26:29 – What trait do you wish you had more of?
]]>
                </itunes:summary>
                                                                            <itunes:duration>01:31:53</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 105 Sanjeev Arora: Off the Convex Path]]>
                </title>
                <pubDate>Mon, 17 May 2021 07:58:43 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-105-sanjeev-arora-off-the-convex-path</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-105-sanjeev-arora-off-the-convex-path</link>
                                <description>
                                            <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2021/05/art-arora-01.jpg" alt="" class="wp-image-1231" />



<div class="wp-block-image"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></div>



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2021/05/sanjeevphotosmall.jpg" alt="" class="wp-image-1230" width="227" height="275" /></div>



<p>Sanjeev and I discuss some of the progress toward understanding how deep learning works, specially under previous assumptions it wouldn’t or shouldn’t work as well as it does. Deep learning theory poses a challenge for mathematics, because its methods aren’t rooted in mathematical theory and therefore are a “black box” for math to open. We discuss how Sanjeev thinks optimization, the common framework for thinking of how deep nets learn, is the wrong approach. Instead, a promising alternative focuses on the learning trajectories that occur as a result of different learning algorithms. We discuss two examples of his research to illustrate this: creating deep nets with infinitely large layers (and the networks still find solutions among the infinite possible solutions!), and massively increasing the learning rate during training (the opposite of accepted wisdom, and yet, again, the network finds solutions!). We also discuss his past focus on computational complexity and how he doesn’t share the current neuroscience optimism comparing brains to deep nets.</p>



<ul><li><a href="https://www.cs.princeton.edu/~arora/">Sanjeev’s website</a>.</li><li>His <a href="https://unsupervised.cs.princeton.edu/">Research group website</a>.</li><li>His blog: <a href="http://offconvex.github.io/">Off The Convex Path.</a></li><li>Papers we discuss<ul><li><a href="https://arxiv.org/abs/1904.11955">On Exact Computation with an Infinitely Wide Neural Net.</a></li><li><a href="https://arxiv.org/pdf/1910.07454.pdf">An Exponential Learning Rate Schedule for Deep Learning</a></li></ul></li><li>Related<ul><li>The episode with <a href="https://braininspired.co/podcast/52/">Andrew Saxe</a> covers related deep learning theory in episode 52.</li><li><a href="https://braininspired.co/wp-admin/post.php?post=1165&amp;action=edit">Omri Barak</a> discusses the importance of learning trajectories to understand RNNs in episode 97.</li><li>Sanjeev mentions <a href="https://ai.columbia.edu/faculty/christos-papadimitriou">Christos Papadimitriou</a>.</li></ul></li></ul>



<p>Timestamps<br />0:00 – Intro<br />7:32 – Computational complexity<br />12:25 – Algorithms<br />13:45 – Deep learning vs. traditional optimization<br />17:01 – Evolving view of deep learning<br />18:33 – Reproducibility crisis in AI?<br />21:12 – Surprising effectiveness of deep learning<br />27:50 – “Optimization” isn’t the right framework<br />30:08 – Infinitely wide nets<br />35:41 – Exponential learning rates<br />42:39 – Data as the next frontier<br />44:12 – Neuroscience and AI differences<br />47:13 – Focus on algorithms, architecture, and objective functions<br />55:50 – Advice for deep learning theorists<br />58:05 – Decoding minds</p>
]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[












Sanjeev and I discuss some of the progress toward understanding how deep learning works, specially under previous assumptions it wouldn’t or shouldn’t work as well as it does. Deep learning theory poses a challenge for mathematics, because its methods aren’t rooted in mathematical theory and therefore are a “black box” for math to open. We discuss how Sanjeev thinks optimization, the common framework for thinking of how deep nets learn, is the wrong approach. Instead, a promising alternative focuses on the learning trajectories that occur as a result of different learning algorithms. We discuss two examples of his research to illustrate this: creating deep nets with infinitely large layers (and the networks still find solutions among the infinite possible solutions!), and massively increasing the learning rate during training (the opposite of accepted wisdom, and yet, again, the network finds solutions!). We also discuss his past focus on computational complexity and how he doesn’t share the current neuroscience optimism comparing brains to deep nets.



Sanjeev’s website.His Research group website.His blog: Off The Convex Path.Papers we discussOn Exact Computation with an Infinitely Wide Neural Net.An Exponential Learning Rate Schedule for Deep LearningRelatedThe episode with Andrew Saxe covers related deep learning theory in episode 52.Omri Barak discusses the importance of learning trajectories to understand RNNs in episode 97.Sanjeev mentions Christos Papadimitriou.



Timestamps0:00 – Intro7:32 – Computational complexity12:25 – Algorithms13:45 – Deep learning vs. traditional optimization17:01 – Evolving view of deep learning18:33 – Reproducibility crisis in AI?21:12 – Surprising effectiveness of deep learning27:50 – “Optimization” isn’t the right framework30:08 – Infinitely wide nets35:41 – Exponential learning rates42:39 – Data as the next frontier44:12 – Neuroscience and AI differences47:13 – Focus on algorithms, architecture, and objective functions55:50 – Advice for deep learning theorists58:05 – Decoding minds
]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 105 Sanjeev Arora: Off the Convex Path]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2021/05/art-arora-01.jpg" alt="" class="wp-image-1231" />



<div class="wp-block-image"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></div>



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2021/05/sanjeevphotosmall.jpg" alt="" class="wp-image-1230" width="227" height="275" /></div>



<p>Sanjeev and I discuss some of the progress toward understanding how deep learning works, specially under previous assumptions it wouldn’t or shouldn’t work as well as it does. Deep learning theory poses a challenge for mathematics, because its methods aren’t rooted in mathematical theory and therefore are a “black box” for math to open. We discuss how Sanjeev thinks optimization, the common framework for thinking of how deep nets learn, is the wrong approach. Instead, a promising alternative focuses on the learning trajectories that occur as a result of different learning algorithms. We discuss two examples of his research to illustrate this: creating deep nets with infinitely large layers (and the networks still find solutions among the infinite possible solutions!), and massively increasing the learning rate during training (the opposite of accepted wisdom, and yet, again, the network finds solutions!). We also discuss his past focus on computational complexity and how he doesn’t share the current neuroscience optimism comparing brains to deep nets.</p>



<ul><li><a href="https://www.cs.princeton.edu/~arora/">Sanjeev’s website</a>.</li><li>His <a href="https://unsupervised.cs.princeton.edu/">Research group website</a>.</li><li>His blog: <a href="http://offconvex.github.io/">Off The Convex Path.</a></li><li>Papers we discuss<ul><li><a href="https://arxiv.org/abs/1904.11955">On Exact Computation with an Infinitely Wide Neural Net.</a></li><li><a href="https://arxiv.org/pdf/1910.07454.pdf">An Exponential Learning Rate Schedule for Deep Learning</a></li></ul></li><li>Related<ul><li>The episode with <a href="https://braininspired.co/podcast/52/">Andrew Saxe</a> covers related deep learning theory in episode 52.</li><li><a href="https://braininspired.co/wp-admin/post.php?post=1165&amp;action=edit">Omri Barak</a> discusses the importance of learning trajectories to understand RNNs in episode 97.</li><li>Sanjeev mentions <a href="https://ai.columbia.edu/faculty/christos-papadimitriou">Christos Papadimitriou</a>.</li></ul></li></ul>



<p>Timestamps<br />0:00 – Intro<br />7:32 – Computational complexity<br />12:25 – Algorithms<br />13:45 – Deep learning vs. traditional optimization<br />17:01 – Evolving view of deep learning<br />18:33 – Reproducibility crisis in AI?<br />21:12 – Surprising effectiveness of deep learning<br />27:50 – “Optimization” isn’t the right framework<br />30:08 – Infinitely wide nets<br />35:41 – Exponential learning rates<br />42:39 – Data as the next frontier<br />44:12 – Neuroscience and AI differences<br />47:13 – Focus on algorithms, architecture, and objective functions<br />55:50 – Advice for deep learning theorists<br />58:05 – Decoding minds</p>
]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/105-Sanjeev-Arora-public.mp3" length="59548189"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[












Sanjeev and I discuss some of the progress toward understanding how deep learning works, specially under previous assumptions it wouldn’t or shouldn’t work as well as it does. Deep learning theory poses a challenge for mathematics, because its methods aren’t rooted in mathematical theory and therefore are a “black box” for math to open. We discuss how Sanjeev thinks optimization, the common framework for thinking of how deep nets learn, is the wrong approach. Instead, a promising alternative focuses on the learning trajectories that occur as a result of different learning algorithms. We discuss two examples of his research to illustrate this: creating deep nets with infinitely large layers (and the networks still find solutions among the infinite possible solutions!), and massively increasing the learning rate during training (the opposite of accepted wisdom, and yet, again, the network finds solutions!). We also discuss his past focus on computational complexity and how he doesn’t share the current neuroscience optimism comparing brains to deep nets.



Sanjeev’s website.His Research group website.His blog: Off The Convex Path.Papers we discussOn Exact Computation with an Infinitely Wide Neural Net.An Exponential Learning Rate Schedule for Deep LearningRelatedThe episode with Andrew Saxe covers related deep learning theory in episode 52.Omri Barak discusses the importance of learning trajectories to understand RNNs in episode 97.Sanjeev mentions Christos Papadimitriou.



Timestamps0:00 – Intro7:32 – Computational complexity12:25 – Algorithms13:45 – Deep learning vs. traditional optimization17:01 – Evolving view of deep learning18:33 – Reproducibility crisis in AI?21:12 – Surprising effectiveness of deep learning27:50 – “Optimization” isn’t the right framework30:08 – Infinitely wide nets35:41 – Exponential learning rates42:39 – Data as the next frontier44:12 – Neuroscience and AI differences47:13 – Focus on algorithms, architecture, and objective functions55:50 – Advice for deep learning theorists58:05 – Decoding minds
]]>
                </itunes:summary>
                                                                            <itunes:duration>01:01:43</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 104 John Kounios and David Rosen: Creativity, Expertise, Insight]]>
                </title>
                <pubDate>Fri, 07 May 2021 10:34:07 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-104-john-kounios-and-david-rosen-creativity-expertise-insight</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-104-john-kounios-and-david-rosen-creativity-expertise-insight</link>
                                <description>
                                            <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2021/05/art-rosen-kounios-01.jpg" alt="" class="wp-image-1223" />



<div class="wp-block-image"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></div>



<ul class="blocks-gallery-grid"><li class="blocks-gallery-item"><img width="1000" height="1000" src="https://braininspired.co/wp-content/uploads/2021/05/davidhead-1.png" alt="" class="wp-image-1226" /></li><li class="blocks-gallery-item"><img width="367" height="367" src="https://braininspired.co/wp-content/uploads/2021/05/johnhead-1.jpg" alt="" class="wp-image-1228" /></li></ul>



<p>What is creativity? How do we measure it? How do our brains implement it, and how might AI?Those are some of the questions John, David, and I discuss. The neuroscience of creativity is young, in its “wild west” days still. We talk about a few creativity studies they’ve performed that distinguish different creative processes with respect to different levels of expertise (in this case, in jazz improvisation), and the underlying brain circuits and activity, including using transcranial direct current stimulation to alter the creative process. Related to creativity, we also discuss the phenomenon and neuroscience of insight (the topic of John’s book, <a href="https://www.amazon.com/gp/product/1079002251/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=1079002251&amp;linkId=3c8899852e62756cee3d2d0bc9285a32">The Eureka Factor</a>), unconscious automatic type 1 processes versus conscious deliberate type 2 processes, states of flow, creative process versus creative products, and a lot more.</p>



<div class="wp-block-image"><a href="https://www.amazon.com/gp/product/1079002251/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=1079002251&amp;linkId=3c8899852e62756cee3d2d0bc9285a32" target="_blank" rel="noreferrer noopener"><img src="https://braininspired.co/wp-content/uploads/2021/05/eurkabook.jpg" alt="" class="wp-image-1224" width="167" height="250" /></a></div>



<ul><li><a href="https://sites.google.com/site/johnkounios/">John Kounios</a>.</li><li><a href="https://www.secretchordlaboratories.com/">Secret Chord Laboratories</a> (David’s company).</li><li>Twitter: <a href="https://twitter.com/JohnKounios">@JohnKounios</a>; <a href="https://twitter.com/NeuroBassDave" target="_blank" rel="noreferrer noopener">@NeuroBassDave</a>.</li><li>John’s book (with Mark Beeman) on insight and creativity.<ul><li><a href="https://www.amazon.com/gp/product/1079002251/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=1079002251&amp;linkId=3c8899852e62756cee3d2d0bc9285a32">The Eureka Factor: Aha Moments, </a><a href="https://www.amazon.com/gp/product/1079002251/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=1079002251&amp;linkId=3c8899852e62756cee3d2d0bc9285a32" target="_blank" rel="noreferrer noopener">Creative</a><a href="https://www.amazon.com/gp/product/1079002251/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=1079002251&amp;linkId=3c8899852e62756cee3d2d0bc9285a32"> Insight, and the Brain</a>.</li></ul></li><li>The papers we discuss or mention:<ul><li><a href="https://www.researchgate.net/publication/315908560_All_You_Need_to_Do_Is_Ask_The_Exhortation_to_Be_Creative_Improves_Creative_Performance_More_for_Nonexpert_Than_Expert_Jazz_Musicians">All You Need to Do Is Ask? The Exhortation to Be Creative Improves Creative Performance More for Nonexpert Than Expert Jazz Musicians</a></li><li><a href="https://www.frontiersin.org/articles/10.3389/fnhum.2016.00579/full">Anodal tDCS to Right Dorsolateral Prefrontal Cortex Facilitates Performance for...</a></li></ul></li></ul>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[












What is creativity? How do we measure it? How do our brains implement it, and how might AI?Those are some of the questions John, David, and I discuss. The neuroscience of creativity is young, in its “wild west” days still. We talk about a few creativity studies they’ve performed that distinguish different creative processes with respect to different levels of expertise (in this case, in jazz improvisation), and the underlying brain circuits and activity, including using transcranial direct current stimulation to alter the creative process. Related to creativity, we also discuss the phenomenon and neuroscience of insight (the topic of John’s book, The Eureka Factor), unconscious automatic type 1 processes versus conscious deliberate type 2 processes, states of flow, creative process versus creative products, and a lot more.







John Kounios.Secret Chord Laboratories (David’s company).Twitter: @JohnKounios; @NeuroBassDave.John’s book (with Mark Beeman) on insight and creativity.The Eureka Factor: Aha Moments, Creative Insight, and the Brain.The papers we discuss or mention:All You Need to Do Is Ask? The Exhortation to Be Creative Improves Creative Performance More for Nonexpert Than Expert Jazz MusiciansAnodal tDCS to Right Dorsolateral Prefrontal Cortex Facilitates Performance for...]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 104 John Kounios and David Rosen: Creativity, Expertise, Insight]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2021/05/art-rosen-kounios-01.jpg" alt="" class="wp-image-1223" />



<div class="wp-block-image"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></div>



<ul class="blocks-gallery-grid"><li class="blocks-gallery-item"><img width="1000" height="1000" src="https://braininspired.co/wp-content/uploads/2021/05/davidhead-1.png" alt="" class="wp-image-1226" /></li><li class="blocks-gallery-item"><img width="367" height="367" src="https://braininspired.co/wp-content/uploads/2021/05/johnhead-1.jpg" alt="" class="wp-image-1228" /></li></ul>



<p>What is creativity? How do we measure it? How do our brains implement it, and how might AI?Those are some of the questions John, David, and I discuss. The neuroscience of creativity is young, in its “wild west” days still. We talk about a few creativity studies they’ve performed that distinguish different creative processes with respect to different levels of expertise (in this case, in jazz improvisation), and the underlying brain circuits and activity, including using transcranial direct current stimulation to alter the creative process. Related to creativity, we also discuss the phenomenon and neuroscience of insight (the topic of John’s book, <a href="https://www.amazon.com/gp/product/1079002251/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=1079002251&amp;linkId=3c8899852e62756cee3d2d0bc9285a32">The Eureka Factor</a>), unconscious automatic type 1 processes versus conscious deliberate type 2 processes, states of flow, creative process versus creative products, and a lot more.</p>



<div class="wp-block-image"><a href="https://www.amazon.com/gp/product/1079002251/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=1079002251&amp;linkId=3c8899852e62756cee3d2d0bc9285a32" target="_blank" rel="noreferrer noopener"><img src="https://braininspired.co/wp-content/uploads/2021/05/eurkabook.jpg" alt="" class="wp-image-1224" width="167" height="250" /></a></div>



<ul><li><a href="https://sites.google.com/site/johnkounios/">John Kounios</a>.</li><li><a href="https://www.secretchordlaboratories.com/">Secret Chord Laboratories</a> (David’s company).</li><li>Twitter: <a href="https://twitter.com/JohnKounios">@JohnKounios</a>; <a href="https://twitter.com/NeuroBassDave" target="_blank" rel="noreferrer noopener">@NeuroBassDave</a>.</li><li>John’s book (with Mark Beeman) on insight and creativity.<ul><li><a href="https://www.amazon.com/gp/product/1079002251/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=1079002251&amp;linkId=3c8899852e62756cee3d2d0bc9285a32">The Eureka Factor: Aha Moments, </a><a href="https://www.amazon.com/gp/product/1079002251/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=1079002251&amp;linkId=3c8899852e62756cee3d2d0bc9285a32" target="_blank" rel="noreferrer noopener">Creative</a><a href="https://www.amazon.com/gp/product/1079002251/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=1079002251&amp;linkId=3c8899852e62756cee3d2d0bc9285a32"> Insight, and the Brain</a>.</li></ul></li><li>The papers we discuss or mention:<ul><li><a href="https://www.researchgate.net/publication/315908560_All_You_Need_to_Do_Is_Ask_The_Exhortation_to_Be_Creative_Improves_Creative_Performance_More_for_Nonexpert_Than_Expert_Jazz_Musicians">All You Need to Do Is Ask? The Exhortation to Be Creative Improves Creative Performance More for Nonexpert Than Expert Jazz Musicians</a></li><li><a href="https://www.frontiersin.org/articles/10.3389/fnhum.2016.00579/full">Anodal tDCS to Right Dorsolateral Prefrontal Cortex Facilitates Performance for Novice Jazz Improvisers but Hinders Experts</a></li><li><a href="https://www.sciencedirect.com/science/article/pii/S1053811920301191">Dual-process contributions to creativity in jazz improvisations: An SPM-EEG study</a>.</li></ul></li></ul>



<p>Timestamps<br />0:00 – Intro<br />16:20 – Where are we broadly in science of creativity?<br />18:23 – Origins of creativity research<br />22:14 – Divergent and convergent thought<br />26:31 – Secret Chord Labs<br />32:40 – Familiar surprise<br />38:55 – The Eureka Factor<br />42:27 – Dual process model<br />52:54 – Creativity and jazz expertise<br />55:53 – “Be creative” behavioral study<br />59:17 – Stimulating the creative brain<br />1:02:04 – Brain circuits underlying creativity<br />1:14:36 – What does this tell us about creativity?<br />1:16:48 – Intelligence vs. creativity<br />1:18:25 – Switching between creative modes<br />1:25:57 – Flow states and insight<br />1:34:29 – Creativity and insight in AI<br />1:43:26 – Creative products vs. process</p>
]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/104-John-Kounios-and-David-Rosen-public.mp3" length="106423489"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[












What is creativity? How do we measure it? How do our brains implement it, and how might AI?Those are some of the questions John, David, and I discuss. The neuroscience of creativity is young, in its “wild west” days still. We talk about a few creativity studies they’ve performed that distinguish different creative processes with respect to different levels of expertise (in this case, in jazz improvisation), and the underlying brain circuits and activity, including using transcranial direct current stimulation to alter the creative process. Related to creativity, we also discuss the phenomenon and neuroscience of insight (the topic of John’s book, The Eureka Factor), unconscious automatic type 1 processes versus conscious deliberate type 2 processes, states of flow, creative process versus creative products, and a lot more.







John Kounios.Secret Chord Laboratories (David’s company).Twitter: @JohnKounios; @NeuroBassDave.John’s book (with Mark Beeman) on insight and creativity.The Eureka Factor: Aha Moments, Creative Insight, and the Brain.The papers we discuss or mention:All You Need to Do Is Ask? The Exhortation to Be Creative Improves Creative Performance More for Nonexpert Than Expert Jazz MusiciansAnodal tDCS to Right Dorsolateral Prefrontal Cortex Facilitates Performance for...]]>
                </itunes:summary>
                                                                            <itunes:duration>01:50:32</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 103 Randal Koene and Ken Hayworth: The Road to Mind Uploading]]>
                </title>
                <pubDate>Mon, 26 Apr 2021 08:19:54 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-103-randal-koene-and-ken-hayworth-the-road-to-mind-uploading</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-103-randal-koene-and-ken-hayworth-the-road-to-mind-uploading</link>
                                <description>
                                            <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2021/04/art-koene-hayworth-01.jpg" alt="" class="wp-image-1216" />



<div class="wp-block-image"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></div>



<ul class="blocks-gallery-grid"><li class="blocks-gallery-item"><img width="300" height="300" src="https://braininspired.co/wp-content/uploads/2021/04/kenHead2-2.jpg" alt="" class="wp-image-1220" /></li><li class="blocks-gallery-item"><img width="300" height="300" src="https://braininspired.co/wp-content/uploads/2021/04/randalHead-1.jpg" alt="" class="wp-image-1221" /></li></ul>



<p>Randal, Ken, and I discuss a host of topics around the future goal of uploading our minds into non-brain systems, to continue our mental lives and expand our range of experiences. The basic requirement for such a subtrate-independent mind is to implement whole brain emulation. We discuss two basic approaches to whole brain emulation. The “scan and copy” approach proposes we somehow scan the entire structure of our brains (at whatever scale is necessary) and store that scan until some future date when we have figured out how to us that information to build a substrate that can house your mind. The “gradual replacement” approach proposes we slowly replace parts of the brain with functioning alternative machines, eventually replacing the entire brain with non-biological material and yet retaining a functioning mind.</p>



<p><br />Randal and Ken are neuroscientists who understand the magnitude and challenges of a massive project like mind uploading, who also understand what we can do right now, with current technology, to advance toward that lofty goal, and who are thoughtful about what steps we need to take to enable further advancements.</p>



<ul><li>Randal A Koene<ul><li>Twitter: <a href="https://twitter.com/randalkoene">@</a><a href="https://twitter.com/randalkoene" target="_blank" rel="noreferrer noopener">randalkoene</a></li><li><a href="https://carboncopies.org/">Carboncopies Foundation</a>.</li><li><a href="https://www.randalkoene.com/home" target="_blank" rel="noreferrer noopener">Randal’s website</a>.</li></ul></li><li>Ken Hayworth<ul><li>Twitter: <a href="https://twitter.com/KennethHayworth">@KennethHayworth</a></li><li><a href="https://www.brainpreservation.org/">Brain Preservation Foundation</a>.<ul><li><a href="https://www.youtube.com/user/brainpreservation">Youtube videos</a>.</li></ul></li></ul></li></ul>



<p>Timestamps<br />0:00 – Intro<br />6:14 – What Ken wants<br />11:22 – What Randal wants<br />22:29 – Brain preservation<br />27:18 – Aldehyde stabilized cryopreservation<br />31:51 – Scan and copy vs. gradual replacement<br />38:25 – Building a roadmap<br />49:45 – Limits of current experimental paradigms<br />53:51 – Our evolved brains<br />1:06:58 – Counterarguments<br />1:10:31 – Animal models for whole brain emulation<br />1:15:01 – Understanding vs. emulating brains<br />1:22:37 – Current challenges</p>
]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[












Randal, Ken, and I discuss a host of topics around the future goal of uploading our minds into non-brain systems, to continue our mental lives and expand our range of experiences. The basic requirement for such a subtrate-independent mind is to implement whole brain emulation. We discuss two basic approaches to whole brain emulation. The “scan and copy” approach proposes we somehow scan the entire structure of our brains (at whatever scale is necessary) and store that scan until some future date when we have figured out how to us that information to build a substrate that can house your mind. The “gradual replacement” approach proposes we slowly replace parts of the brain with functioning alternative machines, eventually replacing the entire brain with non-biological material and yet retaining a functioning mind.



Randal and Ken are neuroscientists who understand the magnitude and challenges of a massive project like mind uploading, who also understand what we can do right now, with current technology, to advance toward that lofty goal, and who are thoughtful about what steps we need to take to enable further advancements.



Randal A KoeneTwitter: @randalkoeneCarboncopies Foundation.Randal’s website.Ken HayworthTwitter: @KennethHayworthBrain Preservation Foundation.Youtube videos.



Timestamps0:00 – Intro6:14 – What Ken wants11:22 – What Randal wants22:29 – Brain preservation27:18 – Aldehyde stabilized cryopreservation31:51 – Scan and copy vs. gradual replacement38:25 – Building a roadmap49:45 – Limits of current experimental paradigms53:51 – Our evolved brains1:06:58 – Counterarguments1:10:31 – Animal models for whole brain emulation1:15:01 – Understanding vs. emulating brains1:22:37 – Current challenges
]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 103 Randal Koene and Ken Hayworth: The Road to Mind Uploading]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2021/04/art-koene-hayworth-01.jpg" alt="" class="wp-image-1216" />



<div class="wp-block-image"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></div>



<ul class="blocks-gallery-grid"><li class="blocks-gallery-item"><img width="300" height="300" src="https://braininspired.co/wp-content/uploads/2021/04/kenHead2-2.jpg" alt="" class="wp-image-1220" /></li><li class="blocks-gallery-item"><img width="300" height="300" src="https://braininspired.co/wp-content/uploads/2021/04/randalHead-1.jpg" alt="" class="wp-image-1221" /></li></ul>



<p>Randal, Ken, and I discuss a host of topics around the future goal of uploading our minds into non-brain systems, to continue our mental lives and expand our range of experiences. The basic requirement for such a subtrate-independent mind is to implement whole brain emulation. We discuss two basic approaches to whole brain emulation. The “scan and copy” approach proposes we somehow scan the entire structure of our brains (at whatever scale is necessary) and store that scan until some future date when we have figured out how to us that information to build a substrate that can house your mind. The “gradual replacement” approach proposes we slowly replace parts of the brain with functioning alternative machines, eventually replacing the entire brain with non-biological material and yet retaining a functioning mind.</p>



<p><br />Randal and Ken are neuroscientists who understand the magnitude and challenges of a massive project like mind uploading, who also understand what we can do right now, with current technology, to advance toward that lofty goal, and who are thoughtful about what steps we need to take to enable further advancements.</p>



<ul><li>Randal A Koene<ul><li>Twitter: <a href="https://twitter.com/randalkoene">@</a><a href="https://twitter.com/randalkoene" target="_blank" rel="noreferrer noopener">randalkoene</a></li><li><a href="https://carboncopies.org/">Carboncopies Foundation</a>.</li><li><a href="https://www.randalkoene.com/home" target="_blank" rel="noreferrer noopener">Randal’s website</a>.</li></ul></li><li>Ken Hayworth<ul><li>Twitter: <a href="https://twitter.com/KennethHayworth">@KennethHayworth</a></li><li><a href="https://www.brainpreservation.org/">Brain Preservation Foundation</a>.<ul><li><a href="https://www.youtube.com/user/brainpreservation">Youtube videos</a>.</li></ul></li></ul></li></ul>



<p>Timestamps<br />0:00 – Intro<br />6:14 – What Ken wants<br />11:22 – What Randal wants<br />22:29 – Brain preservation<br />27:18 – Aldehyde stabilized cryopreservation<br />31:51 – Scan and copy vs. gradual replacement<br />38:25 – Building a roadmap<br />49:45 – Limits of current experimental paradigms<br />53:51 – Our evolved brains<br />1:06:58 – Counterarguments<br />1:10:31 – Animal models for whole brain emulation<br />1:15:01 – Understanding vs. emulating brains<br />1:22:37 – Current challenges</p>
]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/103-Randal-Koene-and-Ken-Hayworth-public.mp3" length="84235073"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[












Randal, Ken, and I discuss a host of topics around the future goal of uploading our minds into non-brain systems, to continue our mental lives and expand our range of experiences. The basic requirement for such a subtrate-independent mind is to implement whole brain emulation. We discuss two basic approaches to whole brain emulation. The “scan and copy” approach proposes we somehow scan the entire structure of our brains (at whatever scale is necessary) and store that scan until some future date when we have figured out how to us that information to build a substrate that can house your mind. The “gradual replacement” approach proposes we slowly replace parts of the brain with functioning alternative machines, eventually replacing the entire brain with non-biological material and yet retaining a functioning mind.



Randal and Ken are neuroscientists who understand the magnitude and challenges of a massive project like mind uploading, who also understand what we can do right now, with current technology, to advance toward that lofty goal, and who are thoughtful about what steps we need to take to enable further advancements.



Randal A KoeneTwitter: @randalkoeneCarboncopies Foundation.Randal’s website.Ken HayworthTwitter: @KennethHayworthBrain Preservation Foundation.Youtube videos.



Timestamps0:00 – Intro6:14 – What Ken wants11:22 – What Randal wants22:29 – Brain preservation27:18 – Aldehyde stabilized cryopreservation31:51 – Scan and copy vs. gradual replacement38:25 – Building a roadmap49:45 – Limits of current experimental paradigms53:51 – Our evolved brains1:06:58 – Counterarguments1:10:31 – Animal models for whole brain emulation1:15:01 – Understanding vs. emulating brains1:22:37 – Current challenges
]]>
                </itunes:summary>
                                                                            <itunes:duration>01:27:26</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 102 Mark Humphries: What Is It Like To Be A Spike?]]>
                </title>
                <pubDate>Fri, 16 Apr 2021 08:58:34 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-102-mark-humphries-what-is-it-like-to-be-a-spike</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-102-mark-humphries-what-is-it-like-to-be-a-spike</link>
                                <description>
                                            <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2021/04/art-humphries-01.jpg" alt="" class="wp-image-1211" />



<div class="wp-block-image"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></div>



<div class="wp-block-image"><a href="https://www.amazon.com/gp/product/0691195889/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=0691195889&amp;linkId=862612dc7cf84f4586028b1eb2b9ef11" target="_blank" rel="noreferrer noopener"><img src="https://braininspired.co/wp-content/uploads/2021/04/spikeimage.jpg" alt="" class="wp-image-1210" width="165" height="250" /></a></div>



<p>Mark and I discuss his book, <a href="https://www.amazon.com/gp/product/0691195889/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=0691195889&amp;linkId=862612dc7cf84f4586028b1eb2b9ef11" target="_blank" rel="noreferrer noopener">The Spike: An Epic Journey Through the Brain in 2.1 Seconds</a>. It chronicles how a series of action potentials fire through the brain in a couple seconds of someone’s life. Starting with light hitting the retina as a person looks at a cookie, Mark describes how that light gets translated into spikes,  how those spikes get processed in our visual system and eventually transform into motor commands to grab that cookie. Along the way, he describes some of the big ideas throughout the history of studying brains (like the mechanisms to explain how neurons seem to fire so randomly), the big mysteries we currently face (like why do so many neurons do so little?), and some of the main theories to explain those mysteries (we’re prediction machines!). A fun read and discussion. This is Mark’s second time on the podcast – <a href="https://braininspired.co/podcast/bi-004-mark-humphries-learning-to-remember/">he was on episode 4</a> in the early days, talking more in depth about some of the work we discuss in this episode!</p>



<div class="wp-block-image"><img width="202" height="250" src="https://braininspired.co/wp-content/uploads/2018/07/mark-humphries.jpeg" alt="" class="wp-image-141" /></div>



<ul><li><a href="https://www.humphries-lab.org/">The Humphries Lab</a>.</li><li>Twitter: <a href="https://twitter.com/markdhumphries">@markdhumphries</a></li><li>Book: <a href="https://www.amazon.com/gp/product/0691195889/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=0691195889&amp;linkId=862612dc7cf84f4586028b1eb2b9ef11" target="_blank" rel="noreferrer noopener">The Spike: An Epic Journey Through the Brain in 2.1 Seconds</a>.</li><li>Related papers<ul><li><a href="https://elifesciences.org/articles/27342">A spiral attractor network drives rhythmic locomotion</a>.</li></ul></li></ul>



<p>Timestamps:</p>



<p>0:00 – Intro<br />3:25 – Writing a book<br />15:37 – Mark’s main interest<br />19:41 – Future explanation of brain/mind<br />27:00 – Stochasticity and excitation/inhibition balance<br />36:56 – Dendritic computation for network dynamics<br />39:10 – Do details matter for AI?<br />44:06 – Spike failure<br />51:12 – Dark neurons<br />1:07:57 – Intrinsic spontaneous activity<br />1:16:16 – Best scientific moment<br />1:23:58 – Failure<br />1:28:45 – Advice</p>
]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[












Mark and I discuss his book, The Spike: An Epic Journey Through the Brain in 2.1 Seconds. It chronicles how a series of action potentials fire through the brain in a couple seconds of someone’s life. Starting with light hitting the retina as a person looks at a cookie, Mark describes how that light gets translated into spikes,  how those spikes get processed in our visual system and eventually transform into motor commands to grab that cookie. Along the way, he describes some of the big ideas throughout the history of studying brains (like the mechanisms to explain how neurons seem to fire so randomly), the big mysteries we currently face (like why do so many neurons do so little?), and some of the main theories to explain those mysteries (we’re prediction machines!). A fun read and discussion. This is Mark’s second time on the podcast – he was on episode 4 in the early days, talking more in depth about some of the work we discuss in this episode!







The Humphries Lab.Twitter: @markdhumphriesBook: The Spike: An Epic Journey Through the Brain in 2.1 Seconds.Related papersA spiral attractor network drives rhythmic locomotion.



Timestamps:



0:00 – Intro3:25 – Writing a book15:37 – Mark’s main interest19:41 – Future explanation of brain/mind27:00 – Stochasticity and excitation/inhibition balance36:56 – Dendritic computation for network dynamics39:10 – Do details matter for AI?44:06 – Spike failure51:12 – Dark neurons1:07:57 – Intrinsic spontaneous activity1:16:16 – Best scientific moment1:23:58 – Failure1:28:45 – Advice
]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 102 Mark Humphries: What Is It Like To Be A Spike?]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2021/04/art-humphries-01.jpg" alt="" class="wp-image-1211" />



<div class="wp-block-image"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></div>



<div class="wp-block-image"><a href="https://www.amazon.com/gp/product/0691195889/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=0691195889&amp;linkId=862612dc7cf84f4586028b1eb2b9ef11" target="_blank" rel="noreferrer noopener"><img src="https://braininspired.co/wp-content/uploads/2021/04/spikeimage.jpg" alt="" class="wp-image-1210" width="165" height="250" /></a></div>



<p>Mark and I discuss his book, <a href="https://www.amazon.com/gp/product/0691195889/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=0691195889&amp;linkId=862612dc7cf84f4586028b1eb2b9ef11" target="_blank" rel="noreferrer noopener">The Spike: An Epic Journey Through the Brain in 2.1 Seconds</a>. It chronicles how a series of action potentials fire through the brain in a couple seconds of someone’s life. Starting with light hitting the retina as a person looks at a cookie, Mark describes how that light gets translated into spikes,  how those spikes get processed in our visual system and eventually transform into motor commands to grab that cookie. Along the way, he describes some of the big ideas throughout the history of studying brains (like the mechanisms to explain how neurons seem to fire so randomly), the big mysteries we currently face (like why do so many neurons do so little?), and some of the main theories to explain those mysteries (we’re prediction machines!). A fun read and discussion. This is Mark’s second time on the podcast – <a href="https://braininspired.co/podcast/bi-004-mark-humphries-learning-to-remember/">he was on episode 4</a> in the early days, talking more in depth about some of the work we discuss in this episode!</p>



<div class="wp-block-image"><img width="202" height="250" src="https://braininspired.co/wp-content/uploads/2018/07/mark-humphries.jpeg" alt="" class="wp-image-141" /></div>



<ul><li><a href="https://www.humphries-lab.org/">The Humphries Lab</a>.</li><li>Twitter: <a href="https://twitter.com/markdhumphries">@markdhumphries</a></li><li>Book: <a href="https://www.amazon.com/gp/product/0691195889/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=0691195889&amp;linkId=862612dc7cf84f4586028b1eb2b9ef11" target="_blank" rel="noreferrer noopener">The Spike: An Epic Journey Through the Brain in 2.1 Seconds</a>.</li><li>Related papers<ul><li><a href="https://elifesciences.org/articles/27342">A spiral attractor network drives rhythmic locomotion</a>.</li></ul></li></ul>



<p>Timestamps:</p>



<p>0:00 – Intro<br />3:25 – Writing a book<br />15:37 – Mark’s main interest<br />19:41 – Future explanation of brain/mind<br />27:00 – Stochasticity and excitation/inhibition balance<br />36:56 – Dendritic computation for network dynamics<br />39:10 – Do details matter for AI?<br />44:06 – Spike failure<br />51:12 – Dark neurons<br />1:07:57 – Intrinsic spontaneous activity<br />1:16:16 – Best scientific moment<br />1:23:58 – Failure<br />1:28:45 – Advice</p>
]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/102-Mark-Humphries.mp3" length="88947053"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[












Mark and I discuss his book, The Spike: An Epic Journey Through the Brain in 2.1 Seconds. It chronicles how a series of action potentials fire through the brain in a couple seconds of someone’s life. Starting with light hitting the retina as a person looks at a cookie, Mark describes how that light gets translated into spikes,  how those spikes get processed in our visual system and eventually transform into motor commands to grab that cookie. Along the way, he describes some of the big ideas throughout the history of studying brains (like the mechanisms to explain how neurons seem to fire so randomly), the big mysteries we currently face (like why do so many neurons do so little?), and some of the main theories to explain those mysteries (we’re prediction machines!). A fun read and discussion. This is Mark’s second time on the podcast – he was on episode 4 in the early days, talking more in depth about some of the work we discuss in this episode!







The Humphries Lab.Twitter: @markdhumphriesBook: The Spike: An Epic Journey Through the Brain in 2.1 Seconds.Related papersA spiral attractor network drives rhythmic locomotion.



Timestamps:



0:00 – Intro3:25 – Writing a book15:37 – Mark’s main interest19:41 – Future explanation of brain/mind27:00 – Stochasticity and excitation/inhibition balance36:56 – Dendritic computation for network dynamics39:10 – Do details matter for AI?44:06 – Spike failure51:12 – Dark neurons1:07:57 – Intrinsic spontaneous activity1:16:16 – Best scientific moment1:23:58 – Failure1:28:45 – Advice
]]>
                </itunes:summary>
                                                                            <itunes:duration>01:32:20</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 101 Steve Potter: Motivating Brains In and Out of Dishes]]>
                </title>
                <pubDate>Tue, 06 Apr 2021 15:51:36 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-101-steve-potter-motivating-brains-in-and-out-of-dishes</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-101-steve-potter-motivating-brains-in-and-out-of-dishes</link>
                                <description>
                                            <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2021/04/art-potter-01.jpg" alt="" class="wp-image-1207" />



<div class="wp-block-image"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></div>



<div class="wp-block-image"><img width="203" height="203" src="https://braininspired.co/wp-content/uploads/2018/07/steve_potter.jpeg" alt="" class="wp-image-137" /></div>



<p>Steve and I discuss his book, <a href="https://www.amazon.com/gp/product/1838172807/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=1838172807&amp;linkId=7f2ad551e783d3a00686770ee61da142">How to Motivate Your Students to Love Learning</a>, which is both a memoir and a guide for teachers and students to optimize the learning experience for intrinsic motivation. Steve taught neuroscience and engineering courses while running his own lab studying the activity of live cultured neural populations (which we discuss at length <a href="https://braininspired.co/podcast/bi-001-steven-potter-brains-in-dishes/">in his previous episode</a>). He relentlessly tested and tweaked his teaching methods, including constant feedback from the students, to optimize their learning experiences. He settled on real-world, project-based learning approaches, like writing wikipedia articles and helping groups of students design and carry out their own experiments. We discuss that, plus the science behind learning, principles important for motivating students and maintaining that motivation, and many of the other valuable insights he shares in the book.</p>



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2021/04/bookcover.jpg" alt="" class="wp-image-1208" width="167" height="250" /></div>



<p><br />The first half of the episode we discuss diverse neuroscience and AI topics, like brain organoids, mind-uploading, synaptic plasticity, and more. Then we discuss many of the stories and lessons from his book, which I recommend for teachers, mentors, and life-long students who want to ensure they’re optimizing their own  learning.</p>



<ul><li><a href="http://potterlab.gatech.edu/">Potter Lab</a>.</li><li>Twitter: <a href="https://twitter.com/stevempotter">@stevempotter</a>.</li><li>The Book: <a href="https://www.amazon.com/gp/product/1838172807/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=1838172807&amp;linkId=7f2ad551e783d3a00686770ee61da142">How to Motivate Your Students to Love Learning.</a></li><li><a href="https://drive.google.com/file/d/1AN1XASJkfNDTbVvIeP8hbO-AnWPEwtKN/view?usp=sharing">The glial cell activity movie</a>.</li></ul>



<p>0:00 – Intro<br />6:38 – Brain organoids<br />18:48 – Glial cell plasticity<br />24:50 – Whole brain emulation<br />35:28 – Industry vs. academia<br />45:32 – Intro to book: How To Motivate Your Students To Love Learning<br />48:29 – Steve’s childhood influences<br />57:21 – Developing one’s own intrinsic motivation<br />1:02:30 – Real-world assignments<br />1:08:00 – Keys to motivation<br />1:11:50 – Peer pressure<br />1:21:16 – Autonomy<br />1:25:38 – Wikipedia real-world assignment<br />1:33:12 – Relation to running a lab</p>
]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[












Steve and I discuss his book, How to Motivate Your Students to Love Learning, which is both a memoir and a guide for teachers and students to optimize the learning experience for intrinsic motivation. Steve taught neuroscience and engineering courses while running his own lab studying the activity of live cultured neural populations (which we discuss at length in his previous episode). He relentlessly tested and tweaked his teaching methods, including constant feedback from the students, to optimize their learning experiences. He settled on real-world, project-based learning approaches, like writing wikipedia articles and helping groups of students design and carry out their own experiments. We discuss that, plus the science behind learning, principles important for motivating students and maintaining that motivation, and many of the other valuable insights he shares in the book.







The first half of the episode we discuss diverse neuroscience and AI topics, like brain organoids, mind-uploading, synaptic plasticity, and more. Then we discuss many of the stories and lessons from his book, which I recommend for teachers, mentors, and life-long students who want to ensure they’re optimizing their own  learning.



Potter Lab.Twitter: @stevempotter.The Book: How to Motivate Your Students to Love Learning.The glial cell activity movie.



0:00 – Intro6:38 – Brain organoids18:48 – Glial cell plasticity24:50 – Whole brain emulation35:28 – Industry vs. academia45:32 – Intro to book: How To Motivate Your Students To Love Learning48:29 – Steve’s childhood influences57:21 – Developing one’s own intrinsic motivation1:02:30 – Real-world assignments1:08:00 – Keys to motivation1:11:50 – Peer pressure1:21:16 – Autonomy1:25:38 – Wikipedia real-world assignment1:33:12 – Relation to running a lab
]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 101 Steve Potter: Motivating Brains In and Out of Dishes]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2021/04/art-potter-01.jpg" alt="" class="wp-image-1207" />



<div class="wp-block-image"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></div>



<div class="wp-block-image"><img width="203" height="203" src="https://braininspired.co/wp-content/uploads/2018/07/steve_potter.jpeg" alt="" class="wp-image-137" /></div>



<p>Steve and I discuss his book, <a href="https://www.amazon.com/gp/product/1838172807/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=1838172807&amp;linkId=7f2ad551e783d3a00686770ee61da142">How to Motivate Your Students to Love Learning</a>, which is both a memoir and a guide for teachers and students to optimize the learning experience for intrinsic motivation. Steve taught neuroscience and engineering courses while running his own lab studying the activity of live cultured neural populations (which we discuss at length <a href="https://braininspired.co/podcast/bi-001-steven-potter-brains-in-dishes/">in his previous episode</a>). He relentlessly tested and tweaked his teaching methods, including constant feedback from the students, to optimize their learning experiences. He settled on real-world, project-based learning approaches, like writing wikipedia articles and helping groups of students design and carry out their own experiments. We discuss that, plus the science behind learning, principles important for motivating students and maintaining that motivation, and many of the other valuable insights he shares in the book.</p>



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2021/04/bookcover.jpg" alt="" class="wp-image-1208" width="167" height="250" /></div>



<p><br />The first half of the episode we discuss diverse neuroscience and AI topics, like brain organoids, mind-uploading, synaptic plasticity, and more. Then we discuss many of the stories and lessons from his book, which I recommend for teachers, mentors, and life-long students who want to ensure they’re optimizing their own  learning.</p>



<ul><li><a href="http://potterlab.gatech.edu/">Potter Lab</a>.</li><li>Twitter: <a href="https://twitter.com/stevempotter">@stevempotter</a>.</li><li>The Book: <a href="https://www.amazon.com/gp/product/1838172807/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=1838172807&amp;linkId=7f2ad551e783d3a00686770ee61da142">How to Motivate Your Students to Love Learning.</a></li><li><a href="https://drive.google.com/file/d/1AN1XASJkfNDTbVvIeP8hbO-AnWPEwtKN/view?usp=sharing">The glial cell activity movie</a>.</li></ul>



<p>0:00 – Intro<br />6:38 – Brain organoids<br />18:48 – Glial cell plasticity<br />24:50 – Whole brain emulation<br />35:28 – Industry vs. academia<br />45:32 – Intro to book: How To Motivate Your Students To Love Learning<br />48:29 – Steve’s childhood influences<br />57:21 – Developing one’s own intrinsic motivation<br />1:02:30 – Real-world assignments<br />1:08:00 – Keys to motivation<br />1:11:50 – Peer pressure<br />1:21:16 – Autonomy<br />1:25:38 – Wikipedia real-world assignment<br />1:33:12 – Relation to running a lab</p>
]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/101-Steve-Potter-public.mp3" length="101459593"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[












Steve and I discuss his book, How to Motivate Your Students to Love Learning, which is both a memoir and a guide for teachers and students to optimize the learning experience for intrinsic motivation. Steve taught neuroscience and engineering courses while running his own lab studying the activity of live cultured neural populations (which we discuss at length in his previous episode). He relentlessly tested and tweaked his teaching methods, including constant feedback from the students, to optimize their learning experiences. He settled on real-world, project-based learning approaches, like writing wikipedia articles and helping groups of students design and carry out their own experiments. We discuss that, plus the science behind learning, principles important for motivating students and maintaining that motivation, and many of the other valuable insights he shares in the book.







The first half of the episode we discuss diverse neuroscience and AI topics, like brain organoids, mind-uploading, synaptic plasticity, and more. Then we discuss many of the stories and lessons from his book, which I recommend for teachers, mentors, and life-long students who want to ensure they’re optimizing their own  learning.



Potter Lab.Twitter: @stevempotter.The Book: How to Motivate Your Students to Love Learning.The glial cell activity movie.



0:00 – Intro6:38 – Brain organoids18:48 – Glial cell plasticity24:50 – Whole brain emulation35:28 – Industry vs. academia45:32 – Intro to book: How To Motivate Your Students To Love Learning48:29 – Steve’s childhood influences57:21 – Developing one’s own intrinsic motivation1:02:30 – Real-world assignments1:08:00 – Keys to motivation1:11:50 – Peer pressure1:21:16 – Autonomy1:25:38 – Wikipedia real-world assignment1:33:12 – Relation to running a lab
]]>
                </itunes:summary>
                                                                            <itunes:duration>01:45:22</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 100.6 Special: Do We Have the Right Vocabulary and Concepts?]]>
                </title>
                <pubDate>Sun, 28 Mar 2021 15:04:17 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-1006-special-do-we-have-the-right-vocabulary-and-concepts</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-1006-special-do-we-have-the-right-vocabulary-and-concepts</link>
                                <description>
                                            <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2021/03/art-100-6-01.jpg" alt="" class="wp-image-1188" />



<div class="wp-block-image"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></div>



<p>We made it to the last bit of our 100th episode celebration. These have been super fun for me, and I hope you’ve enjoyed the collections as well. If you’re wondering where the missing 5th part is, I reserved it exclusively for Brain Inspired’s magnificent <a href="https://www.patreon.com/braininspired">Patreon supporters</a> (thanks guys!!!!). The final question I sent to previous guests:</p>



<p><strong>Do we already have the right vocabulary and concepts to explain how brains and minds are related? Why or why not?</strong></p>



<p>Timestamps:</p>



<p>0:00 – Intro<br />5:04 – <a href="https://braininspired.co/podcast/52/">Andrew Saxe</a><br />7:04 – <a href="https://braininspired.co/podcast/55/">Thomas Naselaris</a><br />7:46 – <a href="https://braininspired.co/podcast/77/">John Krakauer</a><br />9:03 – <a href="https://braininspired.co/podcast/33/">Federico Turkheimer</a><br />11:57 – <a href="https://braininspired.co/podcast/bi-001-steven-potter-brains-in-dishes/">Steve Potter</a><br />13:31 – <a href="https://braininspired.co/podcast/77/">David Krakauer</a><br />17:22 – <a href="https://braininspired.co/podcast/18/">Dean Buonomano</a><br />20:28 – <a href="https://braininspired.co/podcast/27/">Konrad Kording</a><br />22:00 – <a href="https://braininspired.co/podcast/63/">Uri Hasson</a><br />23:15 – <a href="https://braininspired.co/podcast/68/">Rodrigo Quian Quiroga</a><br />24:41 –<a href="https://braininspired.co/podcast/75/"> Jim DiCarlo</a><br />25:26 – <a href="https://braininspired.co/podcast/23/">Marcel van Gerven</a><br />28:02 – <a href="https://braininspired.co/podcast/72/">Mazviita Chirimuuta</a><br />29:27 – <a href="https://braininspired.co/podcast/70/">Brad Love</a><br />31:23 – <a href="https://braininspired.co/podcast/71/">Patrick Mayo</a><br />32:30 – <a href="https://braininspired.co/podcast/84/">György Buzsáki</a><br />37:07 – <a href="https://braininspired.co/podcast/81/">Pieter Roelfsema</a><br />37:26 – <a href="https://braininspired.co/podcast/84/">David Poeppel</a><br />40:22 – <a href="https://braininspired.co/podcast/66/">Paul Cisek</a><br />44:52 – <a href="https://braininspired.co/podcast/44/">Talia Konkle</a><br />47:03 – <a href="https://braininspired.co/podcast/82/">Steve Grossberg</a><br /></p>
]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[








We made it to the last bit of our 100th episode celebration. These have been super fun for me, and I hope you’ve enjoyed the collections as well. If you’re wondering where the missing 5th part is, I reserved it exclusively for Brain Inspired’s magnificent Patreon supporters (thanks guys!!!!). The final question I sent to previous guests:



Do we already have the right vocabulary and concepts to explain how brains and minds are related? Why or why not?



Timestamps:



0:00 – Intro5:04 – Andrew Saxe7:04 – Thomas Naselaris7:46 – John Krakauer9:03 – Federico Turkheimer11:57 – Steve Potter13:31 – David Krakauer17:22 – Dean Buonomano20:28 – Konrad Kording22:00 – Uri Hasson23:15 – Rodrigo Quian Quiroga24:41 – Jim DiCarlo25:26 – Marcel van Gerven28:02 – Mazviita Chirimuuta29:27 – Brad Love31:23 – Patrick Mayo32:30 – György Buzsáki37:07 – Pieter Roelfsema37:26 – David Poeppel40:22 – Paul Cisek44:52 – Talia Konkle47:03 – Steve Grossberg
]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 100.6 Special: Do We Have the Right Vocabulary and Concepts?]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2021/03/art-100-6-01.jpg" alt="" class="wp-image-1188" />



<div class="wp-block-image"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></div>



<p>We made it to the last bit of our 100th episode celebration. These have been super fun for me, and I hope you’ve enjoyed the collections as well. If you’re wondering where the missing 5th part is, I reserved it exclusively for Brain Inspired’s magnificent <a href="https://www.patreon.com/braininspired">Patreon supporters</a> (thanks guys!!!!). The final question I sent to previous guests:</p>



<p><strong>Do we already have the right vocabulary and concepts to explain how brains and minds are related? Why or why not?</strong></p>



<p>Timestamps:</p>



<p>0:00 – Intro<br />5:04 – <a href="https://braininspired.co/podcast/52/">Andrew Saxe</a><br />7:04 – <a href="https://braininspired.co/podcast/55/">Thomas Naselaris</a><br />7:46 – <a href="https://braininspired.co/podcast/77/">John Krakauer</a><br />9:03 – <a href="https://braininspired.co/podcast/33/">Federico Turkheimer</a><br />11:57 – <a href="https://braininspired.co/podcast/bi-001-steven-potter-brains-in-dishes/">Steve Potter</a><br />13:31 – <a href="https://braininspired.co/podcast/77/">David Krakauer</a><br />17:22 – <a href="https://braininspired.co/podcast/18/">Dean Buonomano</a><br />20:28 – <a href="https://braininspired.co/podcast/27/">Konrad Kording</a><br />22:00 – <a href="https://braininspired.co/podcast/63/">Uri Hasson</a><br />23:15 – <a href="https://braininspired.co/podcast/68/">Rodrigo Quian Quiroga</a><br />24:41 –<a href="https://braininspired.co/podcast/75/"> Jim DiCarlo</a><br />25:26 – <a href="https://braininspired.co/podcast/23/">Marcel van Gerven</a><br />28:02 – <a href="https://braininspired.co/podcast/72/">Mazviita Chirimuuta</a><br />29:27 – <a href="https://braininspired.co/podcast/70/">Brad Love</a><br />31:23 – <a href="https://braininspired.co/podcast/71/">Patrick Mayo</a><br />32:30 – <a href="https://braininspired.co/podcast/84/">György Buzsáki</a><br />37:07 – <a href="https://braininspired.co/podcast/81/">Pieter Roelfsema</a><br />37:26 – <a href="https://braininspired.co/podcast/84/">David Poeppel</a><br />40:22 – <a href="https://braininspired.co/podcast/66/">Paul Cisek</a><br />44:52 – <a href="https://braininspired.co/podcast/44/">Talia Konkle</a><br />47:03 – <a href="https://braininspired.co/podcast/82/">Steve Grossberg</a><br /></p>
]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/06-Vocabulary-and-Concepts.mp3" length="48346621"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[








We made it to the last bit of our 100th episode celebration. These have been super fun for me, and I hope you’ve enjoyed the collections as well. If you’re wondering where the missing 5th part is, I reserved it exclusively for Brain Inspired’s magnificent Patreon supporters (thanks guys!!!!). The final question I sent to previous guests:



Do we already have the right vocabulary and concepts to explain how brains and minds are related? Why or why not?



Timestamps:



0:00 – Intro5:04 – Andrew Saxe7:04 – Thomas Naselaris7:46 – John Krakauer9:03 – Federico Turkheimer11:57 – Steve Potter13:31 – David Krakauer17:22 – Dean Buonomano20:28 – Konrad Kording22:00 – Uri Hasson23:15 – Rodrigo Quian Quiroga24:41 – Jim DiCarlo25:26 – Marcel van Gerven28:02 – Mazviita Chirimuuta29:27 – Brad Love31:23 – Patrick Mayo32:30 – György Buzsáki37:07 – Pieter Roelfsema37:26 – David Poeppel40:22 – Paul Cisek44:52 – Talia Konkle47:03 – Steve Grossberg
]]>
                </itunes:summary>
                                                                            <itunes:duration>00:50:03</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 100.4 Special: What Ideas Are Holding Us Back?]]>
                </title>
                <pubDate>Sun, 21 Mar 2021 10:29:11 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-1004-what-ideas-are-holding-us-back</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-1004-what-ideas-are-holding-us-back</link>
                                <description>
                                            <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2021/03/art-100-4-01.jpg" alt="" class="wp-image-1186" />



<div class="wp-block-image"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></div>



<p>In the 4th installment of our 100th episode celebration, previous guests responded to the question:</p>



<p><strong>What ideas, assumptions, or terms do you think is holding back neuroscience/AI, and why?</strong></p>



<p>As usual, the responses are varied and wonderful!</p>



<p></p>



<p>Timestamps:</p>



<p>0:00 – Intro<br />6:41 – <a href="https://braininspired.co/podcast/81/">Pieter Roelfsema</a><br />7:52 – <a href="https://braininspired.co/podcast/11/">Grace Lindsay</a><br />10:23 – <a href="https://braininspired.co/podcast/23/">Marcel van Gerven</a><br />11:38 – <a href="https://braininspired.co/podcast/52/">Andrew Saxe</a><br />14:05 – <a href="https://braininspired.co/podcast/83/">Jane Wang</a><br />16:50 – <a href="https://braininspired.co/podcast/55/">Thomas Naselaris</a><br />18:14 – <a href="https://braininspired.co/podcast/bi-001-steven-potter-brains-in-dishes/">Steve Potter</a><br />19:18 – <a href="https://braininspired.co/podcast/26/">Kendrick Kay</a><br />22:17 – <a href="https://braininspired.co/podcast/9/">Blake Richards</a><br />27:52 –<a href="https://braininspired.co/podcast/30/"> Jay McClelland</a><br />30:13 – <a href="https://braininspired.co/podcast/75/">Jim DiCarlo</a><br />31:17 – <a href="https://braininspired.co/podcast/44/">Talia Konkle</a><br />33:27 – <a href="https://braininspired.co/podcast/63/">Uri Hasson</a><br />35:37 – <a href="https://braininspired.co/podcast/58/">Wolfgang Maass<br /></a>38:48 – <a href="https://braininspired.co/podcast/66/">Paul Cisek</a><br />40:41 – <a href="https://braininspired.co/podcast/71/">Patrick Mayo</a><br />41:51 – <a href="https://braininspired.co/podcast/27/">Konrad Kording</a><br />43:22 – <a href="https://braininspired.co/podcast/84/">David Poeppel</a><br />44:22 – <a href="https://braininspired.co/podcast/70/">Brad Love</a><br />46:47 – <a href="https://braininspired.co/podcast/68/">Rodrigo Quian Quiroga</a><br />47:36 – <a href="https://braininspired.co/podcast/82/">Steve Grossberg</a><br />48:47 – <a href="https://braininspired.co/podcast/bi-004-mark-humphries-learning-to-remember/">Mark Humphries</a><br />52:35 – <a href="https://braininspired.co/podcast/77/">John Krakauer</a><br />55:13 – <a href="https://braininspired.co/podcast/84/">György Buzsáki</a><br />59:50 – <a href="https://braininspired.co/podcast/62/">Stefan Leijnan</a><br />1:02:18 – <a href="https://braininspired.co/podcast/37/">Nathaniel Daw</a></p>
]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[








In the 4th installment of our 100th episode celebration, previous guests responded to the question:



What ideas, assumptions, or terms do you think is holding back neuroscience/AI, and why?



As usual, the responses are varied and wonderful!







Timestamps:



0:00 – Intro6:41 – Pieter Roelfsema7:52 – Grace Lindsay10:23 – Marcel van Gerven11:38 – Andrew Saxe14:05 – Jane Wang16:50 – Thomas Naselaris18:14 – Steve Potter19:18 – Kendrick Kay22:17 – Blake Richards27:52 – Jay McClelland30:13 – Jim DiCarlo31:17 – Talia Konkle33:27 – Uri Hasson35:37 – Wolfgang Maass38:48 – Paul Cisek40:41 – Patrick Mayo41:51 – Konrad Kording43:22 – David Poeppel44:22 – Brad Love46:47 – Rodrigo Quian Quiroga47:36 – Steve Grossberg48:47 – Mark Humphries52:35 – John Krakauer55:13 – György Buzsáki59:50 – Stefan Leijnan1:02:18 – Nathaniel Daw
]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 100.4 Special: What Ideas Are Holding Us Back?]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2021/03/art-100-4-01.jpg" alt="" class="wp-image-1186" />



<div class="wp-block-image"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></div>



<p>In the 4th installment of our 100th episode celebration, previous guests responded to the question:</p>



<p><strong>What ideas, assumptions, or terms do you think is holding back neuroscience/AI, and why?</strong></p>



<p>As usual, the responses are varied and wonderful!</p>



<p></p>



<p>Timestamps:</p>



<p>0:00 – Intro<br />6:41 – <a href="https://braininspired.co/podcast/81/">Pieter Roelfsema</a><br />7:52 – <a href="https://braininspired.co/podcast/11/">Grace Lindsay</a><br />10:23 – <a href="https://braininspired.co/podcast/23/">Marcel van Gerven</a><br />11:38 – <a href="https://braininspired.co/podcast/52/">Andrew Saxe</a><br />14:05 – <a href="https://braininspired.co/podcast/83/">Jane Wang</a><br />16:50 – <a href="https://braininspired.co/podcast/55/">Thomas Naselaris</a><br />18:14 – <a href="https://braininspired.co/podcast/bi-001-steven-potter-brains-in-dishes/">Steve Potter</a><br />19:18 – <a href="https://braininspired.co/podcast/26/">Kendrick Kay</a><br />22:17 – <a href="https://braininspired.co/podcast/9/">Blake Richards</a><br />27:52 –<a href="https://braininspired.co/podcast/30/"> Jay McClelland</a><br />30:13 – <a href="https://braininspired.co/podcast/75/">Jim DiCarlo</a><br />31:17 – <a href="https://braininspired.co/podcast/44/">Talia Konkle</a><br />33:27 – <a href="https://braininspired.co/podcast/63/">Uri Hasson</a><br />35:37 – <a href="https://braininspired.co/podcast/58/">Wolfgang Maass<br /></a>38:48 – <a href="https://braininspired.co/podcast/66/">Paul Cisek</a><br />40:41 – <a href="https://braininspired.co/podcast/71/">Patrick Mayo</a><br />41:51 – <a href="https://braininspired.co/podcast/27/">Konrad Kording</a><br />43:22 – <a href="https://braininspired.co/podcast/84/">David Poeppel</a><br />44:22 – <a href="https://braininspired.co/podcast/70/">Brad Love</a><br />46:47 – <a href="https://braininspired.co/podcast/68/">Rodrigo Quian Quiroga</a><br />47:36 – <a href="https://braininspired.co/podcast/82/">Steve Grossberg</a><br />48:47 – <a href="https://braininspired.co/podcast/bi-004-mark-humphries-learning-to-remember/">Mark Humphries</a><br />52:35 – <a href="https://braininspired.co/podcast/77/">John Krakauer</a><br />55:13 – <a href="https://braininspired.co/podcast/84/">György Buzsáki</a><br />59:50 – <a href="https://braininspired.co/podcast/62/">Stefan Leijnan</a><br />1:02:18 – <a href="https://braininspired.co/podcast/37/">Nathaniel Daw</a></p>
]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/04-Assumptions-Holding-Back.mp3" length="62161652"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[








In the 4th installment of our 100th episode celebration, previous guests responded to the question:



What ideas, assumptions, or terms do you think is holding back neuroscience/AI, and why?



As usual, the responses are varied and wonderful!







Timestamps:



0:00 – Intro6:41 – Pieter Roelfsema7:52 – Grace Lindsay10:23 – Marcel van Gerven11:38 – Andrew Saxe14:05 – Jane Wang16:50 – Thomas Naselaris18:14 – Steve Potter19:18 – Kendrick Kay22:17 – Blake Richards27:52 – Jay McClelland30:13 – Jim DiCarlo31:17 – Talia Konkle33:27 – Uri Hasson35:37 – Wolfgang Maass38:48 – Paul Cisek40:41 – Patrick Mayo41:51 – Konrad Kording43:22 – David Poeppel44:22 – Brad Love46:47 – Rodrigo Quian Quiroga47:36 – Steve Grossberg48:47 – Mark Humphries52:35 – John Krakauer55:13 – György Buzsáki59:50 – Stefan Leijnan1:02:18 – Nathaniel Daw
]]>
                </itunes:summary>
                                    <itunes:image href="https://episodes.castos.com/braininspired/images/art-100-4-01.jpg"></itunes:image>
                                                                            <itunes:duration>01:04:26</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 100.3 Special: Can We Scale Up to AGI with Current Tech?]]>
                </title>
                <pubDate>Wed, 17 Mar 2021 08:03:02 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-1003-can-we-scale-up-to-agi-with-current-tech</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-1003-can-we-scale-up-to-agi-with-current-tech</link>
                                <description>
                                            <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2021/03/art-100-3-01.jpg" alt="" class="wp-image-1184" />



<div class="wp-block-image"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></div>



<p>Part 3 in our 100th episode celebration. Previous guests answered the question: </p>



<p><strong>Given the continual surprising progress in AI powered by scaling up parameters and using more compute, while using fairly generic architectures (eg. GPT-3):</strong></p>



<p><strong>Do you think the current trend of scaling compute can lead to human level AGI?</strong> <strong>If not, what’s missing?</strong></p>



<p>It likely won’t surprise you that the vast majority answer “No.” It also likely won’t surprise you, there is differing opinion on what’s missing.</p>



<p></p>



<p>Timestamps:</p>



<p>0:00 – Intro<br />3:56 – <a href="https://braininspired.co/podcast/58/">Wolgang Maass<br /></a>5:34 – <a href="https://braininspired.co/podcast/29/">Paul Humphreys</a><br />9:16 – <a href="https://braininspired.co/podcast/90/">Chris Eliasmith</a><br />12:52 – <a href="https://braininspired.co/podcast/52/">Andrew Saxe</a><br />16:25 – <a href="https://braininspired.co/podcast/72/">Mazviita Chirimuuta</a><br />18:11 – <a href="https://braininspired.co/podcast/bi-001-steven-potter-brains-in-dishes/">Steve Potter</a><br />19:21 – <a href="https://braininspired.co/podcast/9/">Blake Richards</a><br />22:33 – <a href="https://braininspired.co/podcast/66/">Paul Cisek</a><br />26:24 – <a href="https://braininspired.co/podcast/70/">Brad Lov</a>e<br />29:12 –<a href="https://braininspired.co/podcast/30/"> Jay McClelland</a><br />34:20 – <a href="https://braininspired.co/podcast/73/">Megan Peters</a><br />37:00 – <a href="https://braininspired.co/podcast/18/">Dean Buonomano</a><br />39:48 – <a href="https://braininspired.co/podcast/44/">Talia Konkle</a><br />40:36 – <a href="https://braininspired.co/podcast/82/">Steve Grossberg</a><br />42:40 – <a href="https://braininspired.co/podcast/37/">Nathaniel Daw</a><br />44:02 – <a href="https://braininspired.co/podcast/23/">Marcel van Gerven</a><br />45:28 – <a href="https://braininspired.co/podcast/54/">Kanaka Rajan</a><br />48:25 – <a href="https://braininspired.co/podcast/77/">John Krakauer</a><br />51:05 – <a href="https://braininspired.co/podcast/68/">Rodrigo Quian Quiroga</a><br />53:03 – <a href="https://braininspired.co/podcast/11/">Grace Lindsay</a><br />55:13 – <a href="https://braininspired.co/podcast/27/">Konrad Kording</a><br />57:30 – <a href="https://braininspired.co/podcast/17/">Jeff Hawkins</a><br />102:12 – <a href="https://braininspired.co/podcast/63/">Uri Hasson</a><br />1:04:08 – <a href="https://braininspired.co/podcast/51/">Jess Hamrick</a><br />1:06:20 – <a href="https://braininspired.co/podcast/55/">Thomas Naselaris</a></p>
]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[








Part 3 in our 100th episode celebration. Previous guests answered the question: 



Given the continual surprising progress in AI powered by scaling up parameters and using more compute, while using fairly generic architectures (eg. GPT-3):



Do you think the current trend of scaling compute can lead to human level AGI? If not, what’s missing?



It likely won’t surprise you that the vast majority answer “No.” It also likely won’t surprise you, there is differing opinion on what’s missing.







Timestamps:



0:00 – Intro3:56 – Wolgang Maass5:34 – Paul Humphreys9:16 – Chris Eliasmith12:52 – Andrew Saxe16:25 – Mazviita Chirimuuta18:11 – Steve Potter19:21 – Blake Richards22:33 – Paul Cisek26:24 – Brad Love29:12 – Jay McClelland34:20 – Megan Peters37:00 – Dean Buonomano39:48 – Talia Konkle40:36 – Steve Grossberg42:40 – Nathaniel Daw44:02 – Marcel van Gerven45:28 – Kanaka Rajan48:25 – John Krakauer51:05 – Rodrigo Quian Quiroga53:03 – Grace Lindsay55:13 – Konrad Kording57:30 – Jeff Hawkins102:12 – Uri Hasson1:04:08 – Jess Hamrick1:06:20 – Thomas Naselaris
]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 100.3 Special: Can We Scale Up to AGI with Current Tech?]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2021/03/art-100-3-01.jpg" alt="" class="wp-image-1184" />



<div class="wp-block-image"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></div>



<p>Part 3 in our 100th episode celebration. Previous guests answered the question: </p>



<p><strong>Given the continual surprising progress in AI powered by scaling up parameters and using more compute, while using fairly generic architectures (eg. GPT-3):</strong></p>



<p><strong>Do you think the current trend of scaling compute can lead to human level AGI?</strong> <strong>If not, what’s missing?</strong></p>



<p>It likely won’t surprise you that the vast majority answer “No.” It also likely won’t surprise you, there is differing opinion on what’s missing.</p>



<p></p>



<p>Timestamps:</p>



<p>0:00 – Intro<br />3:56 – <a href="https://braininspired.co/podcast/58/">Wolgang Maass<br /></a>5:34 – <a href="https://braininspired.co/podcast/29/">Paul Humphreys</a><br />9:16 – <a href="https://braininspired.co/podcast/90/">Chris Eliasmith</a><br />12:52 – <a href="https://braininspired.co/podcast/52/">Andrew Saxe</a><br />16:25 – <a href="https://braininspired.co/podcast/72/">Mazviita Chirimuuta</a><br />18:11 – <a href="https://braininspired.co/podcast/bi-001-steven-potter-brains-in-dishes/">Steve Potter</a><br />19:21 – <a href="https://braininspired.co/podcast/9/">Blake Richards</a><br />22:33 – <a href="https://braininspired.co/podcast/66/">Paul Cisek</a><br />26:24 – <a href="https://braininspired.co/podcast/70/">Brad Lov</a>e<br />29:12 –<a href="https://braininspired.co/podcast/30/"> Jay McClelland</a><br />34:20 – <a href="https://braininspired.co/podcast/73/">Megan Peters</a><br />37:00 – <a href="https://braininspired.co/podcast/18/">Dean Buonomano</a><br />39:48 – <a href="https://braininspired.co/podcast/44/">Talia Konkle</a><br />40:36 – <a href="https://braininspired.co/podcast/82/">Steve Grossberg</a><br />42:40 – <a href="https://braininspired.co/podcast/37/">Nathaniel Daw</a><br />44:02 – <a href="https://braininspired.co/podcast/23/">Marcel van Gerven</a><br />45:28 – <a href="https://braininspired.co/podcast/54/">Kanaka Rajan</a><br />48:25 – <a href="https://braininspired.co/podcast/77/">John Krakauer</a><br />51:05 – <a href="https://braininspired.co/podcast/68/">Rodrigo Quian Quiroga</a><br />53:03 – <a href="https://braininspired.co/podcast/11/">Grace Lindsay</a><br />55:13 – <a href="https://braininspired.co/podcast/27/">Konrad Kording</a><br />57:30 – <a href="https://braininspired.co/podcast/17/">Jeff Hawkins</a><br />102:12 – <a href="https://braininspired.co/podcast/63/">Uri Hasson</a><br />1:04:08 – <a href="https://braininspired.co/podcast/51/">Jess Hamrick</a><br />1:06:20 – <a href="https://braininspired.co/podcast/55/">Thomas Naselaris</a></p>
]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/03-Scale-Up.mp3" length="66265848"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[








Part 3 in our 100th episode celebration. Previous guests answered the question: 



Given the continual surprising progress in AI powered by scaling up parameters and using more compute, while using fairly generic architectures (eg. GPT-3):



Do you think the current trend of scaling compute can lead to human level AGI? If not, what’s missing?



It likely won’t surprise you that the vast majority answer “No.” It also likely won’t surprise you, there is differing opinion on what’s missing.







Timestamps:



0:00 – Intro3:56 – Wolgang Maass5:34 – Paul Humphreys9:16 – Chris Eliasmith12:52 – Andrew Saxe16:25 – Mazviita Chirimuuta18:11 – Steve Potter19:21 – Blake Richards22:33 – Paul Cisek26:24 – Brad Love29:12 – Jay McClelland34:20 – Megan Peters37:00 – Dean Buonomano39:48 – Talia Konkle40:36 – Steve Grossberg42:40 – Nathaniel Daw44:02 – Marcel van Gerven45:28 – Kanaka Rajan48:25 – John Krakauer51:05 – Rodrigo Quian Quiroga53:03 – Grace Lindsay55:13 – Konrad Kording57:30 – Jeff Hawkins102:12 – Uri Hasson1:04:08 – Jess Hamrick1:06:20 – Thomas Naselaris
]]>
                </itunes:summary>
                                    <itunes:image href="https://episodes.castos.com/braininspired/images/art-100-3-01.jpg"></itunes:image>
                                                                            <itunes:duration>01:08:43</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 100.2 Special: What Are the Biggest Challenges and Disagreements?]]>
                </title>
                <pubDate>Fri, 12 Mar 2021 08:24:11 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-1002-what-are-the-biggest-challenges-and-disagreements</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-1002-what-are-the-biggest-challenges-and-disagreements</link>
                                <description>
                                            <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2021/03/art-100-2-01.jpg" alt="" class="wp-image-1182" />



<div class="wp-block-image"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></div>



<p>In this 2nd special 100th episode installment, many previous guests answer the question: What is currently the most important disagreement or challenge in neuroscience and/or AI, and what do you think the right answer or direction is? The variety of answers is itself revealing, and highlights how many interesting problems there are to work on.</p>



<p>Timestamps:</p>



<p>0:00 – Intro<br />7:10 – <a href="https://braininspired.co/podcast/68/" target="_blank" rel="noreferrer noopener">Rodrigo Quian Quiroga</a><br />8:33 – <a href="https://braininspired.co/podcast/72/" target="_blank" rel="noreferrer noopener">Mazviita Chirimuuta</a><br />9:15 – <a href="https://braininspired.co/podcast/90/" target="_blank" rel="noreferrer noopener">Chris Eliasmith</a><br />12:50 – <a href="https://braininspired.co/podcast/75/" target="_blank" rel="noreferrer noopener">Jim DiCarlo</a><br />13:23 – <a href="https://braininspired.co/podcast/66/" target="_blank" rel="noreferrer noopener">Paul Cisek</a><br />16:42 – <a href="https://braininspired.co/podcast/37/" target="_blank" rel="noreferrer noopener">Nathaniel Daw</a><br />17:58 – <a href="https://braininspired.co/podcast/51/" target="_blank" rel="noreferrer noopener">Jessica Hamrick</a><br />19:07 – <a href="https://braininspired.co/podcast/92/" target="_blank" rel="noreferrer noopener">Russ Poldrack</a><br />20:47 – <a href="https://braininspired.co/podcast/81/" target="_blank" rel="noreferrer noopener">Pieter Roelfsema</a><br />22:21 – <a href="https://braininspired.co/podcast/27/" target="_blank" rel="noreferrer noopener">Konrad Kording</a><br />25:16 – <a href="https://braininspired.co/podcast/89/" target="_blank" rel="noreferrer noopener">Matt Smith</a><br />27:55 – <a href="https://braininspired.co/podcast/32/" target="_blank" rel="noreferrer noopener">Rafal Bogacz</a><br />29:17 –<a href="https://braininspired.co/podcast/77/" target="_blank" rel="noreferrer noopener"> John Krakauer</a><br />30:47 – <a href="https://braininspired.co/podcast/23/" target="_blank" rel="noreferrer noopener">Marcel van Gerven</a><br />31:49 – <a href="https://braininspired.co/podcast/23/" target="_blank" rel="noreferrer noopener">György Buzsáki</a><br />35:38 – <a href="https://braininspired.co/podcast/55/" target="_blank" rel="noreferrer noopener">Thomas Naselaris</a><br />36:55 – <a href="https://braininspired.co/podcast/82/" target="_blank" rel="noreferrer noopener">Steve Grossberg</a><br />48:32 – <a href="https://braininspired.co/podcast/84/" target="_blank" rel="noreferrer noopener">David Poeppel</a><br />49:24 – <a href="https://braininspired.co/podcast/71/" target="_blank" rel="noreferrer noopener">Patrick Mayo</a><br />50:31 – <a href="https://braininspired.co/podcast/62/" target="_blank" rel="noreferrer noopener">Stefan Leijnen</a><br />54:24 – <a href="https://braininspired.co/podcast/77/" target="_blank" rel="noreferrer noopener">David Krakuer</a><br />58:13 – <a href="https://braininspired.co/podcast/58/" target="_blank" rel="noreferrer noopener">Wolfang Maass</a><br />59:13 – <a href="https://braininspired.co/podcast/63/" target="_blank" rel="noreferrer noopener">Uri Hasson</a><br />59:50 – <a href="https://braininspired.co/podcast/bi-001-steven-potter-brains-in-dishes/" target="_blank" rel="noreferrer noopener">Steve Potter</a><br />1:01:50 – <a href="https://braininspired.co/podcast/44/" target="_blank" rel="noreferrer noopener">Talia Konkle</a><br />1:04:30 – <a href="https://braininspired.co/podcast/21/" target="_blank" rel="noreferrer noopener">Matt Botvinick</a><br />1:06:36 – <a href="https://braininspired.co/podcast/70/" target="_blank" rel="noreferrer noopener"></a></p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[








In this 2nd special 100th episode installment, many previous guests answer the question: What is currently the most important disagreement or challenge in neuroscience and/or AI, and what do you think the right answer or direction is? The variety of answers is itself revealing, and highlights how many interesting problems there are to work on.



Timestamps:



0:00 – Intro7:10 – Rodrigo Quian Quiroga8:33 – Mazviita Chirimuuta9:15 – Chris Eliasmith12:50 – Jim DiCarlo13:23 – Paul Cisek16:42 – Nathaniel Daw17:58 – Jessica Hamrick19:07 – Russ Poldrack20:47 – Pieter Roelfsema22:21 – Konrad Kording25:16 – Matt Smith27:55 – Rafal Bogacz29:17 – John Krakauer30:47 – Marcel van Gerven31:49 – György Buzsáki35:38 – Thomas Naselaris36:55 – Steve Grossberg48:32 – David Poeppel49:24 – Patrick Mayo50:31 – Stefan Leijnen54:24 – David Krakuer58:13 – Wolfang Maass59:13 – Uri Hasson59:50 – Steve Potter1:01:50 – Talia Konkle1:04:30 – Matt Botvinick1:06:36 – ]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 100.2 Special: What Are the Biggest Challenges and Disagreements?]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2021/03/art-100-2-01.jpg" alt="" class="wp-image-1182" />



<div class="wp-block-image"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></div>



<p>In this 2nd special 100th episode installment, many previous guests answer the question: What is currently the most important disagreement or challenge in neuroscience and/or AI, and what do you think the right answer or direction is? The variety of answers is itself revealing, and highlights how many interesting problems there are to work on.</p>



<p>Timestamps:</p>



<p>0:00 – Intro<br />7:10 – <a href="https://braininspired.co/podcast/68/" target="_blank" rel="noreferrer noopener">Rodrigo Quian Quiroga</a><br />8:33 – <a href="https://braininspired.co/podcast/72/" target="_blank" rel="noreferrer noopener">Mazviita Chirimuuta</a><br />9:15 – <a href="https://braininspired.co/podcast/90/" target="_blank" rel="noreferrer noopener">Chris Eliasmith</a><br />12:50 – <a href="https://braininspired.co/podcast/75/" target="_blank" rel="noreferrer noopener">Jim DiCarlo</a><br />13:23 – <a href="https://braininspired.co/podcast/66/" target="_blank" rel="noreferrer noopener">Paul Cisek</a><br />16:42 – <a href="https://braininspired.co/podcast/37/" target="_blank" rel="noreferrer noopener">Nathaniel Daw</a><br />17:58 – <a href="https://braininspired.co/podcast/51/" target="_blank" rel="noreferrer noopener">Jessica Hamrick</a><br />19:07 – <a href="https://braininspired.co/podcast/92/" target="_blank" rel="noreferrer noopener">Russ Poldrack</a><br />20:47 – <a href="https://braininspired.co/podcast/81/" target="_blank" rel="noreferrer noopener">Pieter Roelfsema</a><br />22:21 – <a href="https://braininspired.co/podcast/27/" target="_blank" rel="noreferrer noopener">Konrad Kording</a><br />25:16 – <a href="https://braininspired.co/podcast/89/" target="_blank" rel="noreferrer noopener">Matt Smith</a><br />27:55 – <a href="https://braininspired.co/podcast/32/" target="_blank" rel="noreferrer noopener">Rafal Bogacz</a><br />29:17 –<a href="https://braininspired.co/podcast/77/" target="_blank" rel="noreferrer noopener"> John Krakauer</a><br />30:47 – <a href="https://braininspired.co/podcast/23/" target="_blank" rel="noreferrer noopener">Marcel van Gerven</a><br />31:49 – <a href="https://braininspired.co/podcast/23/" target="_blank" rel="noreferrer noopener">György Buzsáki</a><br />35:38 – <a href="https://braininspired.co/podcast/55/" target="_blank" rel="noreferrer noopener">Thomas Naselaris</a><br />36:55 – <a href="https://braininspired.co/podcast/82/" target="_blank" rel="noreferrer noopener">Steve Grossberg</a><br />48:32 – <a href="https://braininspired.co/podcast/84/" target="_blank" rel="noreferrer noopener">David Poeppel</a><br />49:24 – <a href="https://braininspired.co/podcast/71/" target="_blank" rel="noreferrer noopener">Patrick Mayo</a><br />50:31 – <a href="https://braininspired.co/podcast/62/" target="_blank" rel="noreferrer noopener">Stefan Leijnen</a><br />54:24 – <a href="https://braininspired.co/podcast/77/" target="_blank" rel="noreferrer noopener">David Krakuer</a><br />58:13 – <a href="https://braininspired.co/podcast/58/" target="_blank" rel="noreferrer noopener">Wolfang Maass</a><br />59:13 – <a href="https://braininspired.co/podcast/63/" target="_blank" rel="noreferrer noopener">Uri Hasson</a><br />59:50 – <a href="https://braininspired.co/podcast/bi-001-steven-potter-brains-in-dishes/" target="_blank" rel="noreferrer noopener">Steve Potter</a><br />1:01:50 – <a href="https://braininspired.co/podcast/44/" target="_blank" rel="noreferrer noopener">Talia Konkle</a><br />1:04:30 – <a href="https://braininspired.co/podcast/21/" target="_blank" rel="noreferrer noopener">Matt Botvinick</a><br />1:06:36 – <a href="https://braininspired.co/podcast/70/" target="_blank" rel="noreferrer noopener">Brad Love</a><br />1:09:46 – <a href="https://braininspired.co/podcast/53/" target="_blank" rel="noreferrer noopener">Jon Brennan</a><br />1:19:31 – <a href="https://braininspired.co/podcast/11/" target="_blank" rel="noreferrer noopener">Grace Lindsay</a><br />1:22:28 – <a href="https://braininspired.co/podcast/52/" target="_blank" rel="noreferrer noopener">Andrew Saxe</a></p>
]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/02-Biggest-Challenge.mp3" length="81907837"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[








In this 2nd special 100th episode installment, many previous guests answer the question: What is currently the most important disagreement or challenge in neuroscience and/or AI, and what do you think the right answer or direction is? The variety of answers is itself revealing, and highlights how many interesting problems there are to work on.



Timestamps:



0:00 – Intro7:10 – Rodrigo Quian Quiroga8:33 – Mazviita Chirimuuta9:15 – Chris Eliasmith12:50 – Jim DiCarlo13:23 – Paul Cisek16:42 – Nathaniel Daw17:58 – Jessica Hamrick19:07 – Russ Poldrack20:47 – Pieter Roelfsema22:21 – Konrad Kording25:16 – Matt Smith27:55 – Rafal Bogacz29:17 – John Krakauer30:47 – Marcel van Gerven31:49 – György Buzsáki35:38 – Thomas Naselaris36:55 – Steve Grossberg48:32 – David Poeppel49:24 – Patrick Mayo50:31 – Stefan Leijnen54:24 – David Krakuer58:13 – Wolfang Maass59:13 – Uri Hasson59:50 – Steve Potter1:01:50 – Talia Konkle1:04:30 – Matt Botvinick1:06:36 – ]]>
                </itunes:summary>
                                    <itunes:image href="https://episodes.castos.com/braininspired/images/art-100-2-01.jpg"></itunes:image>
                                                                            <itunes:duration>01:25:00</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 100.1 Special: What Has Improved Your Career or Well-being?]]>
                </title>
                <pubDate>Tue, 09 Mar 2021 15:18:23 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-1001-special-what-has-improved-your-career-or-well-being</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-1001-special-what-has-improved-your-career-or-well-being</link>
                                <description>
                                            <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2021/03/art-100-1-01.jpg" alt="" class="wp-image-1180" />



<div class="wp-block-image"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></div>



<p>Brain Inspired turns 100 (episodes) today! To celebrate, my <a href="https://www.patreon.com/braininspired" target="_blank" rel="noreferrer noopener">patreon supporters</a> helped me create a list of questions to ask my previous guests, many of whom contributed by answering any or all of the questions. I’ve collected all their responses into separate little episodes, one for each question. Starting with a light-hearted (but quite valuable) one, this episode has responses to the question, “In the last five years, what new belief, behavior, or habit has most improved your career or well being?” See below for links to each previous guest. And away we go…</p>



<p>Timestamps:</p>



<p>0:00 – Intro<br />6:13 – <a href="https://braininspired.co/podcast/77/" target="_blank" rel="noreferrer noopener">David Krakauer</a><br />8:50 – <a href="https://braininspired.co/podcast/84/" target="_blank" rel="noreferrer noopener">David Poeppel</a><br />9:32 – <a href="https://braininspired.co/podcast/30/" target="_blank" rel="noreferrer noopener">Jay McClelland</a><br />11:03 – <a href="https://braininspired.co/podcast/71/" target="_blank" rel="noreferrer noopener">Patrick Mayo</a><br />11:45 – <a href="https://braininspired.co/podcast/23/" target="_blank" rel="noreferrer noopener">Marcel van Gerven</a><br />12:11 – <a href="https://braininspired.co/podcast/9/" target="_blank" rel="noreferrer noopener">Blake Richards</a><br />12:25 – <a href="https://braininspired.co/podcast/77/" target="_blank" rel="noreferrer noopener">John Krakauer</a><br />14:22 – <a href="https://braininspired.co/podcast/57/" target="_blank" rel="noreferrer noopener">Nicole Rust</a><br />15:26 – <a href="https://braininspired.co/podcast/73/" target="_blank" rel="noreferrer noopener">Megan Peters</a><br />17:03 – <a href="https://braininspired.co/podcast/52/" target="_blank" rel="noreferrer noopener">Andrew Saxe</a><br />18:11 – <a href="https://braininspired.co/podcast/33/" target="_blank" rel="noreferrer noopener">Federico Turkheimer</a><br />20:03 – <a href="https://braininspired.co/podcast/68/" target="_blank" rel="noreferrer noopener">Rodrigo Quian Quiroga</a><br />22:03 – <a href="https://braininspired.co/podcast/55/" target="_blank" rel="noreferrer noopener">Thomas Naselaris</a><br />23:09 – <a href="https://braininspired.co/podcast/bi-001-steven-potter-brains-in-dishes/" target="_blank" rel="noreferrer noopener">Steve Potter</a><br />24:37 – <a href="https://braininspired.co/podcast/70/" target="_blank" rel="noreferrer noopener">Brad Love</a><br />27:18 – <a href="https://braininspired.co/podcast/82/" target="_blank" rel="noreferrer noopener">Steve Grossberg</a><br />29:04 – <a href="https://braininspired.co/podcast/44/" target="_blank" rel="noreferrer noopener">Talia Konkle</a><br />29:58 – <a href="https://braininspired.co/podcast/66/" target="_blank" rel="noreferrer noopener">Paul Cisek</a><br />32:28 – <a href="https://braininspired.co/podcast/54/" target="_blank" rel="noreferrer noopener">Kanaka Rajan</a><br />34:33 – <a href="https://braininspired.co/podcast/11/" target="_blank" rel="noreferrer noopener">Grace Lindsay</a><br />35:40 – <a href="https://braininspired.co/podcast/27/" target="_blank" rel="noreferrer noopener">Konrad Kording</a><br />36:30 – <a href="https://braininspired.co/podcast/bi-004-mark-humphries-learning-to-remember/" target="_blank" rel="noreferrer noopener">Mark Humphries</a></p>
]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[








Brain Inspired turns 100 (episodes) today! To celebrate, my patreon supporters helped me create a list of questions to ask my previous guests, many of whom contributed by answering any or all of the questions. I’ve collected all their responses into separate little episodes, one for each question. Starting with a light-hearted (but quite valuable) one, this episode has responses to the question, “In the last five years, what new belief, behavior, or habit has most improved your career or well being?” See below for links to each previous guest. And away we go…



Timestamps:



0:00 – Intro6:13 – David Krakauer8:50 – David Poeppel9:32 – Jay McClelland11:03 – Patrick Mayo11:45 – Marcel van Gerven12:11 – Blake Richards12:25 – John Krakauer14:22 – Nicole Rust15:26 – Megan Peters17:03 – Andrew Saxe18:11 – Federico Turkheimer20:03 – Rodrigo Quian Quiroga22:03 – Thomas Naselaris23:09 – Steve Potter24:37 – Brad Love27:18 – Steve Grossberg29:04 – Talia Konkle29:58 – Paul Cisek32:28 – Kanaka Rajan34:33 – Grace Lindsay35:40 – Konrad Kording36:30 – Mark Humphries
]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 100.1 Special: What Has Improved Your Career or Well-being?]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2021/03/art-100-1-01.jpg" alt="" class="wp-image-1180" />



<div class="wp-block-image"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></div>



<p>Brain Inspired turns 100 (episodes) today! To celebrate, my <a href="https://www.patreon.com/braininspired" target="_blank" rel="noreferrer noopener">patreon supporters</a> helped me create a list of questions to ask my previous guests, many of whom contributed by answering any or all of the questions. I’ve collected all their responses into separate little episodes, one for each question. Starting with a light-hearted (but quite valuable) one, this episode has responses to the question, “In the last five years, what new belief, behavior, or habit has most improved your career or well being?” See below for links to each previous guest. And away we go…</p>



<p>Timestamps:</p>



<p>0:00 – Intro<br />6:13 – <a href="https://braininspired.co/podcast/77/" target="_blank" rel="noreferrer noopener">David Krakauer</a><br />8:50 – <a href="https://braininspired.co/podcast/84/" target="_blank" rel="noreferrer noopener">David Poeppel</a><br />9:32 – <a href="https://braininspired.co/podcast/30/" target="_blank" rel="noreferrer noopener">Jay McClelland</a><br />11:03 – <a href="https://braininspired.co/podcast/71/" target="_blank" rel="noreferrer noopener">Patrick Mayo</a><br />11:45 – <a href="https://braininspired.co/podcast/23/" target="_blank" rel="noreferrer noopener">Marcel van Gerven</a><br />12:11 – <a href="https://braininspired.co/podcast/9/" target="_blank" rel="noreferrer noopener">Blake Richards</a><br />12:25 – <a href="https://braininspired.co/podcast/77/" target="_blank" rel="noreferrer noopener">John Krakauer</a><br />14:22 – <a href="https://braininspired.co/podcast/57/" target="_blank" rel="noreferrer noopener">Nicole Rust</a><br />15:26 – <a href="https://braininspired.co/podcast/73/" target="_blank" rel="noreferrer noopener">Megan Peters</a><br />17:03 – <a href="https://braininspired.co/podcast/52/" target="_blank" rel="noreferrer noopener">Andrew Saxe</a><br />18:11 – <a href="https://braininspired.co/podcast/33/" target="_blank" rel="noreferrer noopener">Federico Turkheimer</a><br />20:03 – <a href="https://braininspired.co/podcast/68/" target="_blank" rel="noreferrer noopener">Rodrigo Quian Quiroga</a><br />22:03 – <a href="https://braininspired.co/podcast/55/" target="_blank" rel="noreferrer noopener">Thomas Naselaris</a><br />23:09 – <a href="https://braininspired.co/podcast/bi-001-steven-potter-brains-in-dishes/" target="_blank" rel="noreferrer noopener">Steve Potter</a><br />24:37 – <a href="https://braininspired.co/podcast/70/" target="_blank" rel="noreferrer noopener">Brad Love</a><br />27:18 – <a href="https://braininspired.co/podcast/82/" target="_blank" rel="noreferrer noopener">Steve Grossberg</a><br />29:04 – <a href="https://braininspired.co/podcast/44/" target="_blank" rel="noreferrer noopener">Talia Konkle</a><br />29:58 – <a href="https://braininspired.co/podcast/66/" target="_blank" rel="noreferrer noopener">Paul Cisek</a><br />32:28 – <a href="https://braininspired.co/podcast/54/" target="_blank" rel="noreferrer noopener">Kanaka Rajan</a><br />34:33 – <a href="https://braininspired.co/podcast/11/" target="_blank" rel="noreferrer noopener">Grace Lindsay</a><br />35:40 – <a href="https://braininspired.co/podcast/27/" target="_blank" rel="noreferrer noopener">Konrad Kording</a><br />36:30 – <a href="https://braininspired.co/podcast/bi-004-mark-humphries-learning-to-remember/" target="_blank" rel="noreferrer noopener">Mark Humphries</a></p>
]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/01-Last-5-years.mp3" length="41139882"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[








Brain Inspired turns 100 (episodes) today! To celebrate, my patreon supporters helped me create a list of questions to ask my previous guests, many of whom contributed by answering any or all of the questions. I’ve collected all their responses into separate little episodes, one for each question. Starting with a light-hearted (but quite valuable) one, this episode has responses to the question, “In the last five years, what new belief, behavior, or habit has most improved your career or well being?” See below for links to each previous guest. And away we go…



Timestamps:



0:00 – Intro6:13 – David Krakauer8:50 – David Poeppel9:32 – Jay McClelland11:03 – Patrick Mayo11:45 – Marcel van Gerven12:11 – Blake Richards12:25 – John Krakauer14:22 – Nicole Rust15:26 – Megan Peters17:03 – Andrew Saxe18:11 – Federico Turkheimer20:03 – Rodrigo Quian Quiroga22:03 – Thomas Naselaris23:09 – Steve Potter24:37 – Brad Love27:18 – Steve Grossberg29:04 – Talia Konkle29:58 – Paul Cisek32:28 – Kanaka Rajan34:33 – Grace Lindsay35:40 – Konrad Kording36:30 – Mark Humphries
]]>
                </itunes:summary>
                                    <itunes:image href="https://episodes.castos.com/braininspired/images/art-100-1-01.jpg"></itunes:image>
                                                                            <itunes:duration>00:42:32</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 099 Hakwan Lau and Steve Fleming: Neuro-AI Consciousness]]>
                </title>
                <pubDate>Sun, 28 Feb 2021 05:56:30 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-099-hakwan-lau-and-steve-fleming-neuro-ai-consciousness</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-099-hakwan-lau-and-steve-fleming-neuro-ai-consciousness</link>
                                <description>
                                            <![CDATA[
<div class="wp-block-image"><img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2021/02/art-lau-fleming-01.jpg" alt="" class="wp-image-1176" /></div>



<div class="wp-block-image"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></div>



<p>Hakwan, Steve, and I discuss many issues around the scientific study of consciousness. Steve and Hakwan focus on higher order theories (HOTs) of consciousness, related to metacognition. So we discuss HOTs in particular and their relation to other approaches/theories, the idea of approaching consciousness as a computational problem to be tackled with computational modeling, we talk about the cultural, social, and career aspects of choosing to study something as elusive and controversial as consciousness, we talk about two of the models they’re working on now to account for various properties of conscious experience, and, of course, the prospects of consciousness in AI. For more on metacognition and awareness, check out <a href="https://braininspired.co/podcast/73/">episode 73 with Megan Peters</a>.</p>



<ul class="blocks-gallery-grid"><li class="blocks-gallery-item"><img width="256" height="320" src="https://braininspired.co/wp-content/uploads/2021/02/headnewsteve_fleming.jpg" alt="" class="wp-image-1177" /></li><li class="blocks-gallery-item"><img width="256" height="320" src="https://braininspired.co/wp-content/uploads/2021/02/maxresdefault.jpg" alt="" class="wp-image-1178" /></li></ul>



<ul><li>Hakwan’s lab: <a href="https://sites.google.com/view/hakwan-lau-lab" target="_blank" rel="noreferrer noopener">Consciousness and Metacognition Lab</a>.</li><li>Steve’s lab: <a href="https://metacoglab.org/" target="_blank" rel="noreferrer noopener">The MetaLab</a>.</li><li>Twitter: <a href="https://twitter.com/hakwanlau" target="_blank" rel="noreferrer noopener">@hakwanlau</a>; <a href="https://twitter.com/smfleming" target="_blank" rel="noreferrer noopener">@smfleming</a>.</li><li>Hakwan’s brief Aeon article: <a href="https://aeon.co/ideas/is-consciousness-a-battle-between-your-beliefs-and-perceptions" target="_blank" rel="noreferrer noopener">Is consciousness a battle between your beliefs and perceptions?</a></li><li>Related papers<ul><li><a href="https://www.frontiersin.org/articles/10.3389/fpsyg.2018.02134/full">An Informal Internet Survey on the Current State of Consciousness Science</a>.</li><li><a href="http://rahnevlab.gatech.edu/documents/papers/Michel_etal(2019)NHB.pdf">Opportunities and challenges for a maturing science of consciousness.</a></li><li><a href="https://science.sciencemag.org/content/358/6362/486.full">What is consciousness, and could machines have it?”</a></li><li><a href="https://philpapers.org/archive/BROTMH.pdf">Understanding the higher-order approach to consciousness.</a></li><li><a href="http://metacoglab.org/s/Fleming2020NOC.pdf">Awareness as inference in a higher-order state space</a>. (Steve’s bayesian predictive generative model)</li><li><a href="https://psyarxiv.com/ckbyf/">Consciousness, Metacognition, &amp; Perceptual Reality Monitoring</a>. (Hakwan’s reality-monitoring model a la generative adversarial networks)</li></ul></li></ul>



<p>Timestamps<br />0:00 – Intro<br />7:25 – Steve’s upcoming book<br />8:40 – Challenges to study consciousness<br />15:50 – Gurus and backscratchers<br />23:58 – Will the problem of consciousness disappear?<br />27:52 – Will an explanation feel intuitive?<br />29:54 – What do you want to be true?<br />38:35 – Lucid dreaming<br />40:55 – Higher order theories<br />50:13 – Reality monitoring model of consciousness<br />1:00:15 – Higher order state space model of consciousness<br />1:05:50 – Comparing their models<br />1:10:47 – Machine consciousness<br />1:15:30 – Nature of first order representations<br />1:18:20 – Consciousness prior (Yoshua Bengio)<br />1:20:20 – Function of consciousness<br />1:3...</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[








Hakwan, Steve, and I discuss many issues around the scientific study of consciousness. Steve and Hakwan focus on higher order theories (HOTs) of consciousness, related to metacognition. So we discuss HOTs in particular and their relation to other approaches/theories, the idea of approaching consciousness as a computational problem to be tackled with computational modeling, we talk about the cultural, social, and career aspects of choosing to study something as elusive and controversial as consciousness, we talk about two of the models they’re working on now to account for various properties of conscious experience, and, of course, the prospects of consciousness in AI. For more on metacognition and awareness, check out episode 73 with Megan Peters.







Hakwan’s lab: Consciousness and Metacognition Lab.Steve’s lab: The MetaLab.Twitter: @hakwanlau; @smfleming.Hakwan’s brief Aeon article: Is consciousness a battle between your beliefs and perceptions?Related papersAn Informal Internet Survey on the Current State of Consciousness Science.Opportunities and challenges for a maturing science of consciousness.What is consciousness, and could machines have it?”Understanding the higher-order approach to consciousness.Awareness as inference in a higher-order state space. (Steve’s bayesian predictive generative model)Consciousness, Metacognition, & Perceptual Reality Monitoring. (Hakwan’s reality-monitoring model a la generative adversarial networks)



Timestamps0:00 – Intro7:25 – Steve’s upcoming book8:40 – Challenges to study consciousness15:50 – Gurus and backscratchers23:58 – Will the problem of consciousness disappear?27:52 – Will an explanation feel intuitive?29:54 – What do you want to be true?38:35 – Lucid dreaming40:55 – Higher order theories50:13 – Reality monitoring model of consciousness1:00:15 – Higher order state space model of consciousness1:05:50 – Comparing their models1:10:47 – Machine consciousness1:15:30 – Nature of first order representations1:18:20 – Consciousness prior (Yoshua Bengio)1:20:20 – Function of consciousness1:3...]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 099 Hakwan Lau and Steve Fleming: Neuro-AI Consciousness]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[
<div class="wp-block-image"><img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2021/02/art-lau-fleming-01.jpg" alt="" class="wp-image-1176" /></div>



<div class="wp-block-image"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></div>



<p>Hakwan, Steve, and I discuss many issues around the scientific study of consciousness. Steve and Hakwan focus on higher order theories (HOTs) of consciousness, related to metacognition. So we discuss HOTs in particular and their relation to other approaches/theories, the idea of approaching consciousness as a computational problem to be tackled with computational modeling, we talk about the cultural, social, and career aspects of choosing to study something as elusive and controversial as consciousness, we talk about two of the models they’re working on now to account for various properties of conscious experience, and, of course, the prospects of consciousness in AI. For more on metacognition and awareness, check out <a href="https://braininspired.co/podcast/73/">episode 73 with Megan Peters</a>.</p>



<ul class="blocks-gallery-grid"><li class="blocks-gallery-item"><img width="256" height="320" src="https://braininspired.co/wp-content/uploads/2021/02/headnewsteve_fleming.jpg" alt="" class="wp-image-1177" /></li><li class="blocks-gallery-item"><img width="256" height="320" src="https://braininspired.co/wp-content/uploads/2021/02/maxresdefault.jpg" alt="" class="wp-image-1178" /></li></ul>



<ul><li>Hakwan’s lab: <a href="https://sites.google.com/view/hakwan-lau-lab" target="_blank" rel="noreferrer noopener">Consciousness and Metacognition Lab</a>.</li><li>Steve’s lab: <a href="https://metacoglab.org/" target="_blank" rel="noreferrer noopener">The MetaLab</a>.</li><li>Twitter: <a href="https://twitter.com/hakwanlau" target="_blank" rel="noreferrer noopener">@hakwanlau</a>; <a href="https://twitter.com/smfleming" target="_blank" rel="noreferrer noopener">@smfleming</a>.</li><li>Hakwan’s brief Aeon article: <a href="https://aeon.co/ideas/is-consciousness-a-battle-between-your-beliefs-and-perceptions" target="_blank" rel="noreferrer noopener">Is consciousness a battle between your beliefs and perceptions?</a></li><li>Related papers<ul><li><a href="https://www.frontiersin.org/articles/10.3389/fpsyg.2018.02134/full">An Informal Internet Survey on the Current State of Consciousness Science</a>.</li><li><a href="http://rahnevlab.gatech.edu/documents/papers/Michel_etal(2019)NHB.pdf">Opportunities and challenges for a maturing science of consciousness.</a></li><li><a href="https://science.sciencemag.org/content/358/6362/486.full">What is consciousness, and could machines have it?”</a></li><li><a href="https://philpapers.org/archive/BROTMH.pdf">Understanding the higher-order approach to consciousness.</a></li><li><a href="http://metacoglab.org/s/Fleming2020NOC.pdf">Awareness as inference in a higher-order state space</a>. (Steve’s bayesian predictive generative model)</li><li><a href="https://psyarxiv.com/ckbyf/">Consciousness, Metacognition, &amp; Perceptual Reality Monitoring</a>. (Hakwan’s reality-monitoring model a la generative adversarial networks)</li></ul></li></ul>



<p>Timestamps<br />0:00 – Intro<br />7:25 – Steve’s upcoming book<br />8:40 – Challenges to study consciousness<br />15:50 – Gurus and backscratchers<br />23:58 – Will the problem of consciousness disappear?<br />27:52 – Will an explanation feel intuitive?<br />29:54 – What do you want to be true?<br />38:35 – Lucid dreaming<br />40:55 – Higher order theories<br />50:13 – Reality monitoring model of consciousness<br />1:00:15 – Higher order state space model of consciousness<br />1:05:50 – Comparing their models<br />1:10:47 – Machine consciousness<br />1:15:30 – Nature of first order representations<br />1:18:20 – Consciousness prior (Yoshua Bengio)<br />1:20:20 – Function of consciousness<br />1:31:57 – Legacy<br />1:40:55 – Current projects</p>
]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/099-Hakwan-Lau-and-Steve-Fleming-public.mp3" length="102621193"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[








Hakwan, Steve, and I discuss many issues around the scientific study of consciousness. Steve and Hakwan focus on higher order theories (HOTs) of consciousness, related to metacognition. So we discuss HOTs in particular and their relation to other approaches/theories, the idea of approaching consciousness as a computational problem to be tackled with computational modeling, we talk about the cultural, social, and career aspects of choosing to study something as elusive and controversial as consciousness, we talk about two of the models they’re working on now to account for various properties of conscious experience, and, of course, the prospects of consciousness in AI. For more on metacognition and awareness, check out episode 73 with Megan Peters.







Hakwan’s lab: Consciousness and Metacognition Lab.Steve’s lab: The MetaLab.Twitter: @hakwanlau; @smfleming.Hakwan’s brief Aeon article: Is consciousness a battle between your beliefs and perceptions?Related papersAn Informal Internet Survey on the Current State of Consciousness Science.Opportunities and challenges for a maturing science of consciousness.What is consciousness, and could machines have it?”Understanding the higher-order approach to consciousness.Awareness as inference in a higher-order state space. (Steve’s bayesian predictive generative model)Consciousness, Metacognition, & Perceptual Reality Monitoring. (Hakwan’s reality-monitoring model a la generative adversarial networks)



Timestamps0:00 – Intro7:25 – Steve’s upcoming book8:40 – Challenges to study consciousness15:50 – Gurus and backscratchers23:58 – Will the problem of consciousness disappear?27:52 – Will an explanation feel intuitive?29:54 – What do you want to be true?38:35 – Lucid dreaming40:55 – Higher order theories50:13 – Reality monitoring model of consciousness1:00:15 – Higher order state space model of consciousness1:05:50 – Comparing their models1:10:47 – Machine consciousness1:15:30 – Nature of first order representations1:18:20 – Consciousness prior (Yoshua Bengio)1:20:20 – Function of consciousness1:3...]]>
                </itunes:summary>
                                    <itunes:image href="https://episodes.castos.com/braininspired/images/art-lau-fleming-01.jpg"></itunes:image>
                                                                            <itunes:duration>01:46:35</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 098 Brian Christian: The Alignment Problem]]>
                </title>
                <pubDate>Thu, 18 Feb 2021 16:50:58 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-098-brian-christian-the-alignment-problem</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-098-brian-christian-the-alignment-problem</link>
                                <description>
                                            <![CDATA[<p>Brian and I discuss a range of topics related to his latest book, <a href="https://www.amazon.com/gp/product/0393635821/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=0393635821&amp;linkId=91a6e45dfd382a081314478830903d79">The Alignment Problem: Machine Learning and Human Values.</a> The alignment problem asks how we can build AI that does what we want it to do, as opposed to building AI that will compromise our own values by accomplishing tasks that may be harmful or dangerous to us. Using some of the stories Brain relates in the book, we talk about:</p>



<ul class="wp-block-list"><li>The history of machine learning and how we got this point;</li><li>Some methods researches are creating to understand what's being represented in neural nets and how they generate their output;</li><li>Some modern proposed solutions to the alignment problem, like programming the machines to learn our preferences so they can help achieve those preferences - an idea called inverse reinforcement learning;</li><li>The thorny issue of accurately knowing our own values- if we get those wrong, will machines also get it wrong?</li></ul>





<p>Links:</p>



<ul class="wp-block-list"><li><a href="https://brianchristian.org/" target="_blank" rel="noreferrer noopener">Brian's website</a>.</li><li>Twitter: <a href="https://twitter.com/brianchristian" target="_blank" rel="noreferrer noopener">@brianchristian</a>.</li><li><a href="https://www.amazon.com/gp/product/0393635821/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=0393635821&amp;linkId=91a6e45dfd382a081314478830903d79">The Alignment Problem:</a><a href="https://www.amazon.com/gp/product/0393635821/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=0393635821&amp;linkId=91a6e45dfd382a081314478830903d79" target="_blank" rel="noreferrer noopener"> </a><a href="https://www.amazon.com/gp/product/0393635821/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=0393635821&amp;linkId=91a6e45dfd382a081314478830903d79">Machine Learning and Human Values.</a></li><li>Related papers<ul><li>Norbert Wiener from 1960: <a href="https://nissenbaum.tech.cornell.edu/papers/Wiener.pdf" target="_blank" rel="noreferrer noopener">Some Moral and Technical Consequences of Automation</a>.</li></ul></li></ul>



<p>Timestamps:
4:22 - Increased work on AI ethics
8:59 - The Alignment Problem overview
12:36 - Stories as important for intelligence
16:50 - What is the alignment problem
17:37 - Who works on the alignment problem?
25:22 - AI ethics degree?
29:03 - Human values
31:33 - AI alignment and evolution
37:10 - Knowing our own values?
46:27 - What have learned about ourselves?
58:51 - Interestingness
1:00:53 - Inverse RL for value alignment
1:04:50 - Current progress
1:10:08 - Developmental psychology
1:17:36 - Models as the danger
1:25:08 - How worried are the experts?</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[Brian and I discuss a range of topics related to his latest book, The Alignment Problem: Machine Learning and Human Values. The alignment problem asks how we can build AI that does what we want it to do, as opposed to building AI that will compromise our own values by accomplishing tasks that may be harmful or dangerous to us. Using some of the stories Brain relates in the book, we talk about:



The history of machine learning and how we got this point;Some methods researches are creating to understand what's being represented in neural nets and how they generate their output;Some modern proposed solutions to the alignment problem, like programming the machines to learn our preferences so they can help achieve those preferences - an idea called inverse reinforcement learning;The thorny issue of accurately knowing our own values- if we get those wrong, will machines also get it wrong?





Links:



Brian's website.Twitter: @brianchristian.The Alignment Problem: Machine Learning and Human Values.Related papersNorbert Wiener from 1960: Some Moral and Technical Consequences of Automation.



Timestamps:
4:22 - Increased work on AI ethics
8:59 - The Alignment Problem overview
12:36 - Stories as important for intelligence
16:50 - What is the alignment problem
17:37 - Who works on the alignment problem?
25:22 - AI ethics degree?
29:03 - Human values
31:33 - AI alignment and evolution
37:10 - Knowing our own values?
46:27 - What have learned about ourselves?
58:51 - Interestingness
1:00:53 - Inverse RL for value alignment
1:04:50 - Current progress
1:10:08 - Developmental psychology
1:17:36 - Models as the danger
1:25:08 - How worried are the experts?]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 098 Brian Christian: The Alignment Problem]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[<p>Brian and I discuss a range of topics related to his latest book, <a href="https://www.amazon.com/gp/product/0393635821/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=0393635821&amp;linkId=91a6e45dfd382a081314478830903d79">The Alignment Problem: Machine Learning and Human Values.</a> The alignment problem asks how we can build AI that does what we want it to do, as opposed to building AI that will compromise our own values by accomplishing tasks that may be harmful or dangerous to us. Using some of the stories Brain relates in the book, we talk about:</p>



<ul class="wp-block-list"><li>The history of machine learning and how we got this point;</li><li>Some methods researches are creating to understand what's being represented in neural nets and how they generate their output;</li><li>Some modern proposed solutions to the alignment problem, like programming the machines to learn our preferences so they can help achieve those preferences - an idea called inverse reinforcement learning;</li><li>The thorny issue of accurately knowing our own values- if we get those wrong, will machines also get it wrong?</li></ul>





<p>Links:</p>



<ul class="wp-block-list"><li><a href="https://brianchristian.org/" target="_blank" rel="noreferrer noopener">Brian's website</a>.</li><li>Twitter: <a href="https://twitter.com/brianchristian" target="_blank" rel="noreferrer noopener">@brianchristian</a>.</li><li><a href="https://www.amazon.com/gp/product/0393635821/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=0393635821&amp;linkId=91a6e45dfd382a081314478830903d79">The Alignment Problem:</a><a href="https://www.amazon.com/gp/product/0393635821/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=0393635821&amp;linkId=91a6e45dfd382a081314478830903d79" target="_blank" rel="noreferrer noopener"> </a><a href="https://www.amazon.com/gp/product/0393635821/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=0393635821&amp;linkId=91a6e45dfd382a081314478830903d79">Machine Learning and Human Values.</a></li><li>Related papers<ul><li>Norbert Wiener from 1960: <a href="https://nissenbaum.tech.cornell.edu/papers/Wiener.pdf" target="_blank" rel="noreferrer noopener">Some Moral and Technical Consequences of Automation</a>.</li></ul></li></ul>



<p>Timestamps:
4:22 - Increased work on AI ethics
8:59 - The Alignment Problem overview
12:36 - Stories as important for intelligence
16:50 - What is the alignment problem
17:37 - Who works on the alignment problem?
25:22 - AI ethics degree?
29:03 - Human values
31:33 - AI alignment and evolution
37:10 - Knowing our own values?
46:27 - What have learned about ourselves?
58:51 - Interestingness
1:00:53 - Inverse RL for value alignment
1:04:50 - Current progress
1:10:08 - Developmental psychology
1:17:36 - Models as the danger
1:25:08 - How worried are the experts?</p>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/098-Brian-Christian-public.mp3" length="89228246"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[Brian and I discuss a range of topics related to his latest book, The Alignment Problem: Machine Learning and Human Values. The alignment problem asks how we can build AI that does what we want it to do, as opposed to building AI that will compromise our own values by accomplishing tasks that may be harmful or dangerous to us. Using some of the stories Brain relates in the book, we talk about:



The history of machine learning and how we got this point;Some methods researches are creating to understand what's being represented in neural nets and how they generate their output;Some modern proposed solutions to the alignment problem, like programming the machines to learn our preferences so they can help achieve those preferences - an idea called inverse reinforcement learning;The thorny issue of accurately knowing our own values- if we get those wrong, will machines also get it wrong?





Links:



Brian's website.Twitter: @brianchristian.The Alignment Problem: Machine Learning and Human Values.Related papersNorbert Wiener from 1960: Some Moral and Technical Consequences of Automation.



Timestamps:
4:22 - Increased work on AI ethics
8:59 - The Alignment Problem overview
12:36 - Stories as important for intelligence
16:50 - What is the alignment problem
17:37 - Who works on the alignment problem?
25:22 - AI ethics degree?
29:03 - Human values
31:33 - AI alignment and evolution
37:10 - Knowing our own values?
46:27 - What have learned about ourselves?
58:51 - Interestingness
1:00:53 - Inverse RL for value alignment
1:04:50 - Current progress
1:10:08 - Developmental psychology
1:17:36 - Models as the danger
1:25:08 - How worried are the experts?]]>
                </itunes:summary>
                                    <itunes:image href="https://episodes.castos.com/braininspired/images/art-christian-01-01.jpg"></itunes:image>
                                                                            <itunes:duration>01:32:38</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 097 Omri Barak and David Sussillo: Dynamics and Structure]]>
                </title>
                <pubDate>Mon, 08 Feb 2021 10:56:29 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-097-omri-barak-and-david-sussillo-dynamics-and-structure</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-097-omri-barak-and-david-sussillo-dynamics-and-structure</link>
                                <description>
                                            <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2021/02/art-barak-sussillo-01.jpg" alt="" class="wp-image-1166" />



<div class="wp-block-image"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></div>



<p>Omri, David and I discuss using recurrent neural network models (RNNs) to understand brains and brain function. Omri and David both use dynamical systems theory (DST) to describe how RNNs solve tasks, and to compare the dynamical stucture/landscape/skeleton of RNNs with real neural population recordings. We talk about how their thoughts have evolved since their 2103 <a href="https://barak.net.technion.ac.il/files/2012/11/sussillo_barak-neco.pdf">Opening the Black Box</a> paper, which began these lines of research and thinking. Some of the other topics we discuss:</p>



<ul class="blocks-gallery-grid"><li class="blocks-gallery-item"><img width="512" height="512" src="https://braininspired.co/wp-content/uploads/2021/02/davidhead.jpeg" alt="" class="wp-image-1167" /></li><li class="blocks-gallery-item"><img width="284" height="390" src="https://braininspired.co/wp-content/uploads/2021/02/OmriBarak.jpg" alt="" class="wp-image-1168" /></li></ul>



<ul><li>The idea of computation via dynamics, which sees computation as a process of evolving neural activity in a state space;</li><li>Whether DST offers a description of mental function (that is, something beyond brain function, closer to the psychological level);</li><li>The difference between classical approaches to modeling brains and the machine learning approach;</li><li>The concept of universality – that the variety of artificial RNNs and natural RNNs (brains) adhere to some similar dynamical structure despite differences in the computations they perform;</li><li>How learning is influenced by the dynamics in an ongoing and ever-changing manner, and how learning (a process) is distinct from optimization (a final trained state).</li><li><a href="https://braininspired.co/podcast/5/" target="_blank" rel="noreferrer noopener">David was on episode 5</a>, for a more introductory episode on dynamics, RNNs, and brains.</li></ul>



<ul><li><a href="https://barak.net.technion.ac.il/" target="_blank" rel="noreferrer noopener">Barak Lab</a></li><li>Twitter: <a href="https://twitter.com/sussillodavid" target="_blank" rel="noreferrer noopener">@SussilloDavid</a></li><li>The papers we discuss or mention:<ul><li>Sussillo, D. &amp; Barak, O. (2013). <a href="https://barak.net.technion.ac.il/files/2012/11/sussillo_barak-neco.pdf" target="_blank" rel="noreferrer noopener">Opening the Black Box: Low-dimensional dynamics in high-dimensional recurrent neural networks.</a></li><li><a href="https://www.annualreviews.org/doi/abs/10.1146/annurev-neuro-092619-094115" target="_blank" rel="noreferrer noopener">Computation Through Neural Population Dynamics</a>.</li><li><a href="https://openreview.net/pdf?id=Byx4NkrtDS">Implementing Inductive bias for different navigation tasks through diverse RNN attrractors.</a></li><li><a href="https://arxiv.org/abs/1909.04358">Dynamics of random recurrent networks with correlated low-rank structure</a>.</li><li><a href="https://arxiv.org/pdf/2011.06066.pdf">Quality of internal representation shapes learning performance in feedback neural networks</a>.</li><li>Feigenbaum’s universality constant original paper: <a href="http://chaosbook.org/extras/mjf/LA-6816-PR.pdf">Feigenbaum, M. J. (1976) “Universality in complex discrete dynamics”, Los Alamos Theoretical Division Annual Report 1975-1976</a></li></ul></li><li>Talks<ul><li><a href="https://vimeo.com/463740473/dfd5f71f61">Universality and individuality in neural dynamics across large populations of recurrent networks</a>.</li><li><a href="https://www.youtube.com/watch?v=HJdH-iXJIqQ&amp;feature=youtu.be">World Wide Theoretical Neuroscience Seminar: Omri Barak, January 6, 2021</a></li></ul>...</li></ul>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[








Omri, David and I discuss using recurrent neural network models (RNNs) to understand brains and brain function. Omri and David both use dynamical systems theory (DST) to describe how RNNs solve tasks, and to compare the dynamical stucture/landscape/skeleton of RNNs with real neural population recordings. We talk about how their thoughts have evolved since their 2103 Opening the Black Box paper, which began these lines of research and thinking. Some of the other topics we discuss:







The idea of computation via dynamics, which sees computation as a process of evolving neural activity in a state space;Whether DST offers a description of mental function (that is, something beyond brain function, closer to the psychological level);The difference between classical approaches to modeling brains and the machine learning approach;The concept of universality – that the variety of artificial RNNs and natural RNNs (brains) adhere to some similar dynamical structure despite differences in the computations they perform;How learning is influenced by the dynamics in an ongoing and ever-changing manner, and how learning (a process) is distinct from optimization (a final trained state).David was on episode 5, for a more introductory episode on dynamics, RNNs, and brains.



Barak LabTwitter: @SussilloDavidThe papers we discuss or mention:Sussillo, D. & Barak, O. (2013). Opening the Black Box: Low-dimensional dynamics in high-dimensional recurrent neural networks.Computation Through Neural Population Dynamics.Implementing Inductive bias for different navigation tasks through diverse RNN attrractors.Dynamics of random recurrent networks with correlated low-rank structure.Quality of internal representation shapes learning performance in feedback neural networks.Feigenbaum’s universality constant original paper: Feigenbaum, M. J. (1976) “Universality in complex discrete dynamics”, Los Alamos Theoretical Division Annual Report 1975-1976TalksUniversality and individuality in neural dynamics across large populations of recurrent networks.World Wide Theoretical Neuroscience Seminar: Omri Barak, January 6, 2021...]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 097 Omri Barak and David Sussillo: Dynamics and Structure]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2021/02/art-barak-sussillo-01.jpg" alt="" class="wp-image-1166" />



<div class="wp-block-image"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></div>



<p>Omri, David and I discuss using recurrent neural network models (RNNs) to understand brains and brain function. Omri and David both use dynamical systems theory (DST) to describe how RNNs solve tasks, and to compare the dynamical stucture/landscape/skeleton of RNNs with real neural population recordings. We talk about how their thoughts have evolved since their 2103 <a href="https://barak.net.technion.ac.il/files/2012/11/sussillo_barak-neco.pdf">Opening the Black Box</a> paper, which began these lines of research and thinking. Some of the other topics we discuss:</p>



<ul class="blocks-gallery-grid"><li class="blocks-gallery-item"><img width="512" height="512" src="https://braininspired.co/wp-content/uploads/2021/02/davidhead.jpeg" alt="" class="wp-image-1167" /></li><li class="blocks-gallery-item"><img width="284" height="390" src="https://braininspired.co/wp-content/uploads/2021/02/OmriBarak.jpg" alt="" class="wp-image-1168" /></li></ul>



<ul><li>The idea of computation via dynamics, which sees computation as a process of evolving neural activity in a state space;</li><li>Whether DST offers a description of mental function (that is, something beyond brain function, closer to the psychological level);</li><li>The difference between classical approaches to modeling brains and the machine learning approach;</li><li>The concept of universality – that the variety of artificial RNNs and natural RNNs (brains) adhere to some similar dynamical structure despite differences in the computations they perform;</li><li>How learning is influenced by the dynamics in an ongoing and ever-changing manner, and how learning (a process) is distinct from optimization (a final trained state).</li><li><a href="https://braininspired.co/podcast/5/" target="_blank" rel="noreferrer noopener">David was on episode 5</a>, for a more introductory episode on dynamics, RNNs, and brains.</li></ul>



<ul><li><a href="https://barak.net.technion.ac.il/" target="_blank" rel="noreferrer noopener">Barak Lab</a></li><li>Twitter: <a href="https://twitter.com/sussillodavid" target="_blank" rel="noreferrer noopener">@SussilloDavid</a></li><li>The papers we discuss or mention:<ul><li>Sussillo, D. &amp; Barak, O. (2013). <a href="https://barak.net.technion.ac.il/files/2012/11/sussillo_barak-neco.pdf" target="_blank" rel="noreferrer noopener">Opening the Black Box: Low-dimensional dynamics in high-dimensional recurrent neural networks.</a></li><li><a href="https://www.annualreviews.org/doi/abs/10.1146/annurev-neuro-092619-094115" target="_blank" rel="noreferrer noopener">Computation Through Neural Population Dynamics</a>.</li><li><a href="https://openreview.net/pdf?id=Byx4NkrtDS">Implementing Inductive bias for different navigation tasks through diverse RNN attrractors.</a></li><li><a href="https://arxiv.org/abs/1909.04358">Dynamics of random recurrent networks with correlated low-rank structure</a>.</li><li><a href="https://arxiv.org/pdf/2011.06066.pdf">Quality of internal representation shapes learning performance in feedback neural networks</a>.</li><li>Feigenbaum’s universality constant original paper: <a href="http://chaosbook.org/extras/mjf/LA-6816-PR.pdf">Feigenbaum, M. J. (1976) “Universality in complex discrete dynamics”, Los Alamos Theoretical Division Annual Report 1975-1976</a></li></ul></li><li>Talks<ul><li><a href="https://vimeo.com/463740473/dfd5f71f61">Universality and individuality in neural dynamics across large populations of recurrent networks</a>.</li><li><a href="https://www.youtube.com/watch?v=HJdH-iXJIqQ&amp;feature=youtu.be">World Wide Theoretical Neuroscience Seminar: Omri Barak, January 6, 2021</a></li></ul></li></ul>



<p>Timestamps:<br />0:00 – Intro <br />5:41 – Best scientific moment <br />9:37 – Why do you do what you do? <br />13:21 – Computation via dynamics <br />19:12 – Evolution of thinking about RNNs and brains <br />26:22 – RNNs vs. minds <br />31:43 – Classical computational modeling vs. machine learning modeling approach <br />35:46 – What are models good for? <br />43:08 – Ecological task validity with respect to using RNNs as models <br />46:27 – Optimization vs. learning <br />49:11 – Universality <br />1:00:47 – Solutions dictated by tasks <br />1:04:51 – Multiple solutions to the same task <br />1:11:43 – Direct fit (Uri Hasson) <br />1:19:09 – Thinking about the bigger picture <br /></p>
]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/097-Omri-Barak-David-Sussillo-public.mp3" length="80899186"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[








Omri, David and I discuss using recurrent neural network models (RNNs) to understand brains and brain function. Omri and David both use dynamical systems theory (DST) to describe how RNNs solve tasks, and to compare the dynamical stucture/landscape/skeleton of RNNs with real neural population recordings. We talk about how their thoughts have evolved since their 2103 Opening the Black Box paper, which began these lines of research and thinking. Some of the other topics we discuss:







The idea of computation via dynamics, which sees computation as a process of evolving neural activity in a state space;Whether DST offers a description of mental function (that is, something beyond brain function, closer to the psychological level);The difference between classical approaches to modeling brains and the machine learning approach;The concept of universality – that the variety of artificial RNNs and natural RNNs (brains) adhere to some similar dynamical structure despite differences in the computations they perform;How learning is influenced by the dynamics in an ongoing and ever-changing manner, and how learning (a process) is distinct from optimization (a final trained state).David was on episode 5, for a more introductory episode on dynamics, RNNs, and brains.



Barak LabTwitter: @SussilloDavidThe papers we discuss or mention:Sussillo, D. & Barak, O. (2013). Opening the Black Box: Low-dimensional dynamics in high-dimensional recurrent neural networks.Computation Through Neural Population Dynamics.Implementing Inductive bias for different navigation tasks through diverse RNN attrractors.Dynamics of random recurrent networks with correlated low-rank structure.Quality of internal representation shapes learning performance in feedback neural networks.Feigenbaum’s universality constant original paper: Feigenbaum, M. J. (1976) “Universality in complex discrete dynamics”, Los Alamos Theoretical Division Annual Report 1975-1976TalksUniversality and individuality in neural dynamics across large populations of recurrent networks.World Wide Theoretical Neuroscience Seminar: Omri Barak, January 6, 2021...]]>
                </itunes:summary>
                                    <itunes:image href="https://episodes.castos.com/braininspired/images/art-barak-sussillo-01.jpg"></itunes:image>
                                                                            <itunes:duration>01:23:57</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 096 Keisuke Fukuda and Josh Cosman: Forking Paths]]>
                </title>
                <pubDate>Fri, 29 Jan 2021 13:04:40 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-096-keisuke-fukuda-and-josh-cosman-forking-paths</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-096-keisuke-fukuda-and-josh-cosman-forking-paths</link>
                                <description>
                                            <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2021/01/art-fukuda-cosman-01.jpg" alt="" class="wp-image-1162" />



<div class="wp-block-image"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /> </div>



<div class="wp-block-image"><img width="400" height="212" src="https://braininspired.co/wp-content/uploads/2021/01/IMG_2663_small.jpg" alt="" class="wp-image-1163" /></div>



<p>K, Josh, and I were postdocs together in <a href="http://www.psy.vanderbilt.edu/faculty/schall/">Jeff Schall’s</a> and <a href="http://www.psy.vanderbilt.edu/faculty/woodman/">Geoff Woodman’s</a> labs. K and Josh had backgrounds in psychology and were getting their first experience with neurophysiology, recording single neuron activity in awake behaving primates. This episode is a discussion surrounding their reflections and perspectives on neuroscience and psychology, given their backgrounds and experience (we reference <a href="https://braininspired.co/podcast/84/">episode 84 with György Buzsáki and David Poeppel</a>). We also talk about their divergent paths – K stayed in academia and runs an EEG lab studying human decision-making and memory, and Josh left academia and has worked for three different pharmaceutical and tech companies. So this episode doesn’t get into gritty science questions, but is a light discussion about the state of neuroscience, psychology, and AI, and reflections on academia and industry, life in lab, and plenty more.</p>



<ul><li><a href="https://fukudalab.org/">The Fukuda Lab</a>.</li><li><a href="http://www.joshcosman.com/">Josh’s website</a>.</li><li>Twitter: <a href="https://twitter.com/KeisukeFukuda4">@KeisukeFukuda4</a></li></ul>



<p>Time stamps<br />0:00 – Intro<br />4:30 – K intro<br />5:30 – Josh Intro<br />10:16 – Academia vs. industry<br />16:01 – Concern with legacy<br />19:57 – Best scientific moment<br />24:15 – Experiencing neuroscience as a psychologist<br />27:20 – Neuroscience as a tool<br />30:38 – Brain/mind divide<br />33:27 – Shallow vs. deep knowledge in academia and industry <br />36:05 – Autonomy in industry<br />42:20 – Is this a turning point in neuroscience?<br />46:54 – Deep learning revolution<br />49:34 – Deep nets to understand brains<br />54:54 – Psychology vs. neuroscience<br />1:06:42 – Is language sufficient?<br />1:11:33 – Human-level AI<br />1:13:53 – How will history view our era of neuroscience?<br />1:23:28 – What would you have done differently?<br />1:26:46 – Something you wish you knew</p>
]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[




 







K, Josh, and I were postdocs together in Jeff Schall’s and Geoff Woodman’s labs. K and Josh had backgrounds in psychology and were getting their first experience with neurophysiology, recording single neuron activity in awake behaving primates. This episode is a discussion surrounding their reflections and perspectives on neuroscience and psychology, given their backgrounds and experience (we reference episode 84 with György Buzsáki and David Poeppel). We also talk about their divergent paths – K stayed in academia and runs an EEG lab studying human decision-making and memory, and Josh left academia and has worked for three different pharmaceutical and tech companies. So this episode doesn’t get into gritty science questions, but is a light discussion about the state of neuroscience, psychology, and AI, and reflections on academia and industry, life in lab, and plenty more.



The Fukuda Lab.Josh’s website.Twitter: @KeisukeFukuda4



Time stamps0:00 – Intro4:30 – K intro5:30 – Josh Intro10:16 – Academia vs. industry16:01 – Concern with legacy19:57 – Best scientific moment24:15 – Experiencing neuroscience as a psychologist27:20 – Neuroscience as a tool30:38 – Brain/mind divide33:27 – Shallow vs. deep knowledge in academia and industry 36:05 – Autonomy in industry42:20 – Is this a turning point in neuroscience?46:54 – Deep learning revolution49:34 – Deep nets to understand brains54:54 – Psychology vs. neuroscience1:06:42 – Is language sufficient?1:11:33 – Human-level AI1:13:53 – How will history view our era of neuroscience?1:23:28 – What would you have done differently?1:26:46 – Something you wish you knew
]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 096 Keisuke Fukuda and Josh Cosman: Forking Paths]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2021/01/art-fukuda-cosman-01.jpg" alt="" class="wp-image-1162" />



<div class="wp-block-image"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /> </div>



<div class="wp-block-image"><img width="400" height="212" src="https://braininspired.co/wp-content/uploads/2021/01/IMG_2663_small.jpg" alt="" class="wp-image-1163" /></div>



<p>K, Josh, and I were postdocs together in <a href="http://www.psy.vanderbilt.edu/faculty/schall/">Jeff Schall’s</a> and <a href="http://www.psy.vanderbilt.edu/faculty/woodman/">Geoff Woodman’s</a> labs. K and Josh had backgrounds in psychology and were getting their first experience with neurophysiology, recording single neuron activity in awake behaving primates. This episode is a discussion surrounding their reflections and perspectives on neuroscience and psychology, given their backgrounds and experience (we reference <a href="https://braininspired.co/podcast/84/">episode 84 with György Buzsáki and David Poeppel</a>). We also talk about their divergent paths – K stayed in academia and runs an EEG lab studying human decision-making and memory, and Josh left academia and has worked for three different pharmaceutical and tech companies. So this episode doesn’t get into gritty science questions, but is a light discussion about the state of neuroscience, psychology, and AI, and reflections on academia and industry, life in lab, and plenty more.</p>



<ul><li><a href="https://fukudalab.org/">The Fukuda Lab</a>.</li><li><a href="http://www.joshcosman.com/">Josh’s website</a>.</li><li>Twitter: <a href="https://twitter.com/KeisukeFukuda4">@KeisukeFukuda4</a></li></ul>



<p>Time stamps<br />0:00 – Intro<br />4:30 – K intro<br />5:30 – Josh Intro<br />10:16 – Academia vs. industry<br />16:01 – Concern with legacy<br />19:57 – Best scientific moment<br />24:15 – Experiencing neuroscience as a psychologist<br />27:20 – Neuroscience as a tool<br />30:38 – Brain/mind divide<br />33:27 – Shallow vs. deep knowledge in academia and industry <br />36:05 – Autonomy in industry<br />42:20 – Is this a turning point in neuroscience?<br />46:54 – Deep learning revolution<br />49:34 – Deep nets to understand brains<br />54:54 – Psychology vs. neuroscience<br />1:06:42 – Is language sufficient?<br />1:11:33 – Human-level AI<br />1:13:53 – How will history view our era of neuroscience?<br />1:23:28 – What would you have done differently?<br />1:26:46 – Something you wish you knew</p>
]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/096-Keisuke-Fukuda-and-Josh-Cosman-Public.mp3" length="90708352"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[




 







K, Josh, and I were postdocs together in Jeff Schall’s and Geoff Woodman’s labs. K and Josh had backgrounds in psychology and were getting their first experience with neurophysiology, recording single neuron activity in awake behaving primates. This episode is a discussion surrounding their reflections and perspectives on neuroscience and psychology, given their backgrounds and experience (we reference episode 84 with György Buzsáki and David Poeppel). We also talk about their divergent paths – K stayed in academia and runs an EEG lab studying human decision-making and memory, and Josh left academia and has worked for three different pharmaceutical and tech companies. So this episode doesn’t get into gritty science questions, but is a light discussion about the state of neuroscience, psychology, and AI, and reflections on academia and industry, life in lab, and plenty more.



The Fukuda Lab.Josh’s website.Twitter: @KeisukeFukuda4



Time stamps0:00 – Intro4:30 – K intro5:30 – Josh Intro10:16 – Academia vs. industry16:01 – Concern with legacy19:57 – Best scientific moment24:15 – Experiencing neuroscience as a psychologist27:20 – Neuroscience as a tool30:38 – Brain/mind divide33:27 – Shallow vs. deep knowledge in academia and industry 36:05 – Autonomy in industry42:20 – Is this a turning point in neuroscience?46:54 – Deep learning revolution49:34 – Deep nets to understand brains54:54 – Psychology vs. neuroscience1:06:42 – Is language sufficient?1:11:33 – Human-level AI1:13:53 – How will history view our era of neuroscience?1:23:28 – What would you have done differently?1:26:46 – Something you wish you knew
]]>
                </itunes:summary>
                                    <itunes:image href="https://episodes.castos.com/braininspired/images/art-fukuda-cosman-01.jpg"></itunes:image>
                                                                            <itunes:duration>01:34:10</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 095 Chris Summerfield and Sam Gershman: Neuro for AI?]]>
                </title>
                <pubDate>Mon, 18 Jan 2021 19:34:14 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-095-chris-summerfield-and-sam-gershman-neuro-for-ai</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-095-chris-summerfield-and-sam-gershman-neuro-for-ai</link>
                                <description>
                                            <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2021/01/art-summerfield-gershman-1-01.jpg" alt="" class="wp-image-1155" />



<div class="wp-block-image"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></div>



<div class="wp-block-image"><img width="240" height="239" src="https://braininspired.co/wp-content/uploads/2021/01/chrisHead.png" alt="" class="wp-image-1156" /></div>



<div class="wp-block-image"><img width="180" height="220" src="https://braininspired.co/wp-content/uploads/2019/02/sam.jpg" alt="" class="wp-image-653" /></div>



<p>It’s generally agreed machine learning and AI provide neuroscience with tools for analysis and theoretical principles to test in brains, but there is less agreement about what neuroscience can provide AI. Should computer scientists and engineers care about how brains compute, or will it just slow them down, for example? Chris, Sam, and I discuss how neuroscience might contribute to AI moving forward, considering the past and present. This discussion also leads into related topics, like the role of prediction versus understanding, AGI, explainable AI, value alignment, the fundamental conundrum that humans specify the ultimate values of the tasks AI will solve, and more. Plus, a question from previous guest <a href="https://braininspired.co/podcast/52/" target="_blank" rel="noreferrer noopener">Andrew Saxe</a>. Also, check out <a href="https://braininspired.co/podcast/28/" target="_blank" rel="noreferrer noopener">Sam’s previous appearance on the podcast</a>.</p>



<ul><li>Chris’s lab: <a href="http://decisions.psy.ox.ac.uk/#!/home" target="_blank" rel="noreferrer noopener">Human Information Processing lab</a>.</li><li>Sam’s lab: <a href="https://gershmanlab.com/index.html" target="_blank" rel="noreferrer noopener">Computational Cognitive Neuroscience Lab</a>.</li><li>Twitter: <a href="https://twitter.com/gershbrain" target="_blank" rel="noreferrer noopener">@gershbrain</a>; <a href="https://twitter.com/summerfieldlab" target="_blank" rel="noreferrer noopener">@summerfieldlab</a></li><li>Papers we discuss or mention or are related:<ul><li><a href="https://arxiv.org/abs/2004.07580">If deep learning is the answer, then what is the question?</a></li><li><a href="https://www.cell.com/neuron/pdf/S0896-6273(17)30509-3.pdf">Neuroscience-Inspired Artificial Intelligence</a>.</li><li><a href="https://www.youtube.com/watch?v=O0MF-r9PsvE">Building Machines that Learn and Think Like People</a>.</li></ul></li></ul>



<p>0:00 – Intro <br />5:00 – Good ol’ days <br />13:50 – AI for neuro, neuro for AI <br />24:25 – Intellectual diversity in AI <br />28:40 – Role of philosophy <br />30:20 – Operationalization and benchmarks <br />36:07 – Prediction vs. understanding <br />42:48 – Role of humans in the loop <br />46:20 – Value alignment <br />51:08 – Andrew Saxe question <br />53:16 – Explainable AI <br />58:55 – Generalization <br />1:01:09 – What has AI revealed about us? <br />1:09:38 – Neuro for AI <br />1:20:30 – Concluding remarks</p>
]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[
















It’s generally agreed machine learning and AI provide neuroscience with tools for analysis and theoretical principles to test in brains, but there is less agreement about what neuroscience can provide AI. Should computer scientists and engineers care about how brains compute, or will it just slow them down, for example? Chris, Sam, and I discuss how neuroscience might contribute to AI moving forward, considering the past and present. This discussion also leads into related topics, like the role of prediction versus understanding, AGI, explainable AI, value alignment, the fundamental conundrum that humans specify the ultimate values of the tasks AI will solve, and more. Plus, a question from previous guest Andrew Saxe. Also, check out Sam’s previous appearance on the podcast.



Chris’s lab: Human Information Processing lab.Sam’s lab: Computational Cognitive Neuroscience Lab.Twitter: @gershbrain; @summerfieldlabPapers we discuss or mention or are related:If deep learning is the answer, then what is the question?Neuroscience-Inspired Artificial Intelligence.Building Machines that Learn and Think Like People.



0:00 – Intro 5:00 – Good ol’ days 13:50 – AI for neuro, neuro for AI 24:25 – Intellectual diversity in AI 28:40 – Role of philosophy 30:20 – Operationalization and benchmarks 36:07 – Prediction vs. understanding 42:48 – Role of humans in the loop 46:20 – Value alignment 51:08 – Andrew Saxe question 53:16 – Explainable AI 58:55 – Generalization 1:01:09 – What has AI revealed about us? 1:09:38 – Neuro for AI 1:20:30 – Concluding remarks
]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 095 Chris Summerfield and Sam Gershman: Neuro for AI?]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2021/01/art-summerfield-gershman-1-01.jpg" alt="" class="wp-image-1155" />



<div class="wp-block-image"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></div>



<div class="wp-block-image"><img width="240" height="239" src="https://braininspired.co/wp-content/uploads/2021/01/chrisHead.png" alt="" class="wp-image-1156" /></div>



<div class="wp-block-image"><img width="180" height="220" src="https://braininspired.co/wp-content/uploads/2019/02/sam.jpg" alt="" class="wp-image-653" /></div>



<p>It’s generally agreed machine learning and AI provide neuroscience with tools for analysis and theoretical principles to test in brains, but there is less agreement about what neuroscience can provide AI. Should computer scientists and engineers care about how brains compute, or will it just slow them down, for example? Chris, Sam, and I discuss how neuroscience might contribute to AI moving forward, considering the past and present. This discussion also leads into related topics, like the role of prediction versus understanding, AGI, explainable AI, value alignment, the fundamental conundrum that humans specify the ultimate values of the tasks AI will solve, and more. Plus, a question from previous guest <a href="https://braininspired.co/podcast/52/" target="_blank" rel="noreferrer noopener">Andrew Saxe</a>. Also, check out <a href="https://braininspired.co/podcast/28/" target="_blank" rel="noreferrer noopener">Sam’s previous appearance on the podcast</a>.</p>



<ul><li>Chris’s lab: <a href="http://decisions.psy.ox.ac.uk/#!/home" target="_blank" rel="noreferrer noopener">Human Information Processing lab</a>.</li><li>Sam’s lab: <a href="https://gershmanlab.com/index.html" target="_blank" rel="noreferrer noopener">Computational Cognitive Neuroscience Lab</a>.</li><li>Twitter: <a href="https://twitter.com/gershbrain" target="_blank" rel="noreferrer noopener">@gershbrain</a>; <a href="https://twitter.com/summerfieldlab" target="_blank" rel="noreferrer noopener">@summerfieldlab</a></li><li>Papers we discuss or mention or are related:<ul><li><a href="https://arxiv.org/abs/2004.07580">If deep learning is the answer, then what is the question?</a></li><li><a href="https://www.cell.com/neuron/pdf/S0896-6273(17)30509-3.pdf">Neuroscience-Inspired Artificial Intelligence</a>.</li><li><a href="https://www.youtube.com/watch?v=O0MF-r9PsvE">Building Machines that Learn and Think Like People</a>.</li></ul></li></ul>



<p>0:00 – Intro <br />5:00 – Good ol’ days <br />13:50 – AI for neuro, neuro for AI <br />24:25 – Intellectual diversity in AI <br />28:40 – Role of philosophy <br />30:20 – Operationalization and benchmarks <br />36:07 – Prediction vs. understanding <br />42:48 – Role of humans in the loop <br />46:20 – Value alignment <br />51:08 – Andrew Saxe question <br />53:16 – Explainable AI <br />58:55 – Generalization <br />1:01:09 – What has AI revealed about us? <br />1:09:38 – Neuro for AI <br />1:20:30 – Concluding remarks</p>
]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/095-Chris-and-Sam.mp3" length="82352236"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[
















It’s generally agreed machine learning and AI provide neuroscience with tools for analysis and theoretical principles to test in brains, but there is less agreement about what neuroscience can provide AI. Should computer scientists and engineers care about how brains compute, or will it just slow them down, for example? Chris, Sam, and I discuss how neuroscience might contribute to AI moving forward, considering the past and present. This discussion also leads into related topics, like the role of prediction versus understanding, AGI, explainable AI, value alignment, the fundamental conundrum that humans specify the ultimate values of the tasks AI will solve, and more. Plus, a question from previous guest Andrew Saxe. Also, check out Sam’s previous appearance on the podcast.



Chris’s lab: Human Information Processing lab.Sam’s lab: Computational Cognitive Neuroscience Lab.Twitter: @gershbrain; @summerfieldlabPapers we discuss or mention or are related:If deep learning is the answer, then what is the question?Neuroscience-Inspired Artificial Intelligence.Building Machines that Learn and Think Like People.



0:00 – Intro 5:00 – Good ol’ days 13:50 – AI for neuro, neuro for AI 24:25 – Intellectual diversity in AI 28:40 – Role of philosophy 30:20 – Operationalization and benchmarks 36:07 – Prediction vs. understanding 42:48 – Role of humans in the loop 46:20 – Value alignment 51:08 – Andrew Saxe question 53:16 – Explainable AI 58:55 – Generalization 1:01:09 – What has AI revealed about us? 1:09:38 – Neuro for AI 1:20:30 – Concluding remarks
]]>
                </itunes:summary>
                                    <itunes:image href="https://episodes.castos.com/braininspired/images/art-summerfield-gershman-1-01.jpg"></itunes:image>
                                                                            <itunes:duration>01:25:28</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 094 Alison Gopnik: Child-Inspired AI]]>
                </title>
                <pubDate>Fri, 08 Jan 2021 16:38:30 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-094-alison-gopnik-child-inspired-ai</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-094-alison-gopnik-child-inspired-ai</link>
                                <description>
                                            <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2021/01/art-gopnik-01.jpg" alt="" class="wp-image-1151" />



<div class="wp-block-image"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></div>



<div class="wp-block-image"><img width="188" height="196" src="https://braininspired.co/wp-content/uploads/2021/01/1-AlisonGopnik.jpg" alt="" class="wp-image-1152" /></div>



<p>Alison and I discuss her work to accelerate learning and thus improve AI by studying how children learn, as Alan Turing suggested in his <a href="https://www.cs.toronto.edu/~frank/csc2501/Readings/R1_Turing/Turing-1950.pdf">famous 1950 paper.</a> The ways children learn are via imitation, by learning abstract causal models, and active learning by implementing a high exploration/exploitation ratio. We also discuss child consciousness, psychedelics, the concept of life history, the role of grandparents and elders, and lots more.</p>



<div class="wp-block-image"><a href="https://www.amazon.com/gp/product/1250132258/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=1250132258&amp;linkId=7fd7b3459a6dd1f0c9628a7bc768dbd5" target="_blank" rel="noreferrer noopener"><img src="https://braininspired.co/wp-content/uploads/2021/01/gardner.jpg" alt="" class="wp-image-1153" width="167" height="250" /></a></div>



<ul><li>Alison’s <a href="http://alisongopnik.com/" target="_blank" rel="noreferrer noopener">Website.</a></li><li><a href="https://www.gopniklab.berkeley.edu/" target="_blank" rel="noreferrer noopener">Cognitive Development and Learning Lab.</a></li><li>Twitter: <a href="https://twitter.com/AlisonGopnik">@AlisonGopnik</a>.</li><li>Related papers:<ul><li><a href="https://royalsocietypublishing.org/doi/full/10.1098/rstb.2019.0502?af=R" target="_blank" rel="noreferrer noopener">Childhood as a solution to explore-exploit tensions.</a></li><li>The Aeon article about grandparents, children, and evolution: <a href="https://aeon.co/essays/why-childhood-and-old-age-are-key-to-our-human-capacities" target="_blank" rel="noreferrer noopener">Vulnerable Yet Vital.</a></li></ul></li><li>Books:<ul><li><a href="https://www.amazon.com/gp/product/1250132258/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=1250132258&amp;linkId=7fd7b3459a6dd1f0c9628a7bc768dbd5" target="_blank" rel="noreferrer noopener">The Gardener and the Carpenter: What the New Science of Child Development Tells Us About the Relationship Between Parents and Children</a>.</li><li><a href="https://www.amazon.com/gp/product/0688177883/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=0688177883&amp;linkId=b88921aefd653fabc296d067278d2db6" target="_blank" rel="noreferrer noopener">The Scientist in the Crib: What Early Learning Tells Us About the Mind</a>.</li><li><a href="https://www.amazon.com/gp/product/0312429843/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=0312429843&amp;linkId=2403712bc2bc7d3070e12d3ee63e8cff" target="_blank" rel="noreferrer noopener">The Philosophical Baby: What Children’s Minds Tell Us About Truth, Love, and the Meaning of Life.</a></li></ul></li></ul>



<p>Take-home points:</p>



<ul><li>Children learn by imitation, and not just unthinking imitation. They pay attention to and evaluate the intentions of others and judge whether a person seems to be a reliable source of information. That is, they learn by sophisticated socially-constrained imitation.</li><li>Children build abstract causal models of the world. This allows them to simulate potential outcomes and test their actions against those simulations, accelerating learning.</li><li>Children keep their foot on the exploration pedal, actively learning by exploring a wide...</li></ul>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[












Alison and I discuss her work to accelerate learning and thus improve AI by studying how children learn, as Alan Turing suggested in his famous 1950 paper. The ways children learn are via imitation, by learning abstract causal models, and active learning by implementing a high exploration/exploitation ratio. We also discuss child consciousness, psychedelics, the concept of life history, the role of grandparents and elders, and lots more.







Alison’s Website.Cognitive Development and Learning Lab.Twitter: @AlisonGopnik.Related papers:Childhood as a solution to explore-exploit tensions.The Aeon article about grandparents, children, and evolution: Vulnerable Yet Vital.Books:The Gardener and the Carpenter: What the New Science of Child Development Tells Us About the Relationship Between Parents and Children.The Scientist in the Crib: What Early Learning Tells Us About the Mind.The Philosophical Baby: What Children’s Minds Tell Us About Truth, Love, and the Meaning of Life.



Take-home points:



Children learn by imitation, and not just unthinking imitation. They pay attention to and evaluate the intentions of others and judge whether a person seems to be a reliable source of information. That is, they learn by sophisticated socially-constrained imitation.Children build abstract causal models of the world. This allows them to simulate potential outcomes and test their actions against those simulations, accelerating learning.Children keep their foot on the exploration pedal, actively learning by exploring a wide...]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 094 Alison Gopnik: Child-Inspired AI]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2021/01/art-gopnik-01.jpg" alt="" class="wp-image-1151" />



<div class="wp-block-image"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></div>



<div class="wp-block-image"><img width="188" height="196" src="https://braininspired.co/wp-content/uploads/2021/01/1-AlisonGopnik.jpg" alt="" class="wp-image-1152" /></div>



<p>Alison and I discuss her work to accelerate learning and thus improve AI by studying how children learn, as Alan Turing suggested in his <a href="https://www.cs.toronto.edu/~frank/csc2501/Readings/R1_Turing/Turing-1950.pdf">famous 1950 paper.</a> The ways children learn are via imitation, by learning abstract causal models, and active learning by implementing a high exploration/exploitation ratio. We also discuss child consciousness, psychedelics, the concept of life history, the role of grandparents and elders, and lots more.</p>



<div class="wp-block-image"><a href="https://www.amazon.com/gp/product/1250132258/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=1250132258&amp;linkId=7fd7b3459a6dd1f0c9628a7bc768dbd5" target="_blank" rel="noreferrer noopener"><img src="https://braininspired.co/wp-content/uploads/2021/01/gardner.jpg" alt="" class="wp-image-1153" width="167" height="250" /></a></div>



<ul><li>Alison’s <a href="http://alisongopnik.com/" target="_blank" rel="noreferrer noopener">Website.</a></li><li><a href="https://www.gopniklab.berkeley.edu/" target="_blank" rel="noreferrer noopener">Cognitive Development and Learning Lab.</a></li><li>Twitter: <a href="https://twitter.com/AlisonGopnik">@AlisonGopnik</a>.</li><li>Related papers:<ul><li><a href="https://royalsocietypublishing.org/doi/full/10.1098/rstb.2019.0502?af=R" target="_blank" rel="noreferrer noopener">Childhood as a solution to explore-exploit tensions.</a></li><li>The Aeon article about grandparents, children, and evolution: <a href="https://aeon.co/essays/why-childhood-and-old-age-are-key-to-our-human-capacities" target="_blank" rel="noreferrer noopener">Vulnerable Yet Vital.</a></li></ul></li><li>Books:<ul><li><a href="https://www.amazon.com/gp/product/1250132258/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=1250132258&amp;linkId=7fd7b3459a6dd1f0c9628a7bc768dbd5" target="_blank" rel="noreferrer noopener">The Gardener and the Carpenter: What the New Science of Child Development Tells Us About the Relationship Between Parents and Children</a>.</li><li><a href="https://www.amazon.com/gp/product/0688177883/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=0688177883&amp;linkId=b88921aefd653fabc296d067278d2db6" target="_blank" rel="noreferrer noopener">The Scientist in the Crib: What Early Learning Tells Us About the Mind</a>.</li><li><a href="https://www.amazon.com/gp/product/0312429843/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=0312429843&amp;linkId=2403712bc2bc7d3070e12d3ee63e8cff" target="_blank" rel="noreferrer noopener">The Philosophical Baby: What Children’s Minds Tell Us About Truth, Love, and the Meaning of Life.</a></li></ul></li></ul>



<p>Take-home points:</p>



<ul><li>Children learn by imitation, and not just unthinking imitation. They pay attention to and evaluate the intentions of others and judge whether a person seems to be a reliable source of information. That is, they learn by sophisticated socially-constrained imitation.</li><li>Children build abstract causal models of the world. This allows them to simulate potential outcomes and test their actions against those simulations, accelerating learning.</li><li>Children keep their foot on the exploration pedal, actively learning by exploring a wide spectrum of actions to determine what works. As we age, our exploratory cognition decreases, and we begin to exploit more what we’ve learned.</li></ul>



<p>Timestamps<br />0:00 – Intro <br />4:40 – State of the field <br />13:30 – Importance of learning <br />20:12 – Turing’s suggestion <br />22:49 – Patience for one’s own ideas <br />28:53 – Learning via imitation <br />31:57 – Learning abstract causal models <br />41:42 – Life history <br />43:22 – Learning via exploration <br />56:19 – Explore-exploit dichotomy <br />58:32 – Synaptic pruning <br />1:00:19 – Breakthrough research in careers <br />1:04:31 – Role of elders <br />1:09:08 – Child consciousness <br />1:11:41 – Psychedelics as child-like brain <br />1:16:00 – Build consciousness into AI?</p>
]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/094-Alison-Gopnik.mp3" length="76343724"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[












Alison and I discuss her work to accelerate learning and thus improve AI by studying how children learn, as Alan Turing suggested in his famous 1950 paper. The ways children learn are via imitation, by learning abstract causal models, and active learning by implementing a high exploration/exploitation ratio. We also discuss child consciousness, psychedelics, the concept of life history, the role of grandparents and elders, and lots more.







Alison’s Website.Cognitive Development and Learning Lab.Twitter: @AlisonGopnik.Related papers:Childhood as a solution to explore-exploit tensions.The Aeon article about grandparents, children, and evolution: Vulnerable Yet Vital.Books:The Gardener and the Carpenter: What the New Science of Child Development Tells Us About the Relationship Between Parents and Children.The Scientist in the Crib: What Early Learning Tells Us About the Mind.The Philosophical Baby: What Children’s Minds Tell Us About Truth, Love, and the Meaning of Life.



Take-home points:



Children learn by imitation, and not just unthinking imitation. They pay attention to and evaluate the intentions of others and judge whether a person seems to be a reliable source of information. That is, they learn by sophisticated socially-constrained imitation.Children build abstract causal models of the world. This allows them to simulate potential outcomes and test their actions against those simulations, accelerating learning.Children keep their foot on the exploration pedal, actively learning by exploring a wide...]]>
                </itunes:summary>
                                    <itunes:image href="https://episodes.castos.com/braininspired/images/art-gopnik-01.jpg"></itunes:image>
                                                                            <itunes:duration>01:19:13</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 093 Dileep George: Inference in Brain Microcircuits]]>
                </title>
                <pubDate>Tue, 29 Dec 2020 11:59:01 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-093-dileep-george-inference-in-brain-microcircuits</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-093-dileep-george-inference-in-brain-microcircuits</link>
                                <description>
                                            <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2020/12/art-george-01.jpg" alt="" class="wp-image-1149" />



<div class="wp-block-image"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></div>



<p>Dileep and I discuss his theoretical account of how the thalamus and cortex work together to implement visual inference. We talked previously about his Recursive Cortical Network (RCN) approach to visual inference, which is a probabilistic graph model that <a href="https://braininspired.co/podcast/13/" target="_blank" rel="noreferrer noopener">can solve hard problems like CAPTCHAs</a>, and more recently we talked about using his RCNs with cloned units to <a href="https://braininspired.co/podcast/87/" target="_blank" rel="noreferrer noopener">account for cognitive maps</a> related to the hippocampus. On this episode, we walk through how RCNs can map onto thalamo-cortical circuits so a given cortical column can signal whether it believes some concept or feature is present in the world, based on bottom-up incoming sensory evidence, top-down attention, and lateral related features. We also briefly compare this bio-RCN version with Randy O’Reilly’s <a href="https://braininspired.co/podcast/88/" target="_blank" rel="noreferrer noopener">Deep Predictive Learning</a> account of thalamo-cortical circuitry. </p>



<div class="wp-block-image"><img width="212" height="212" src="https://braininspired.co/wp-content/uploads/2018/10/dileep-212x212.jpg" alt="" class="wp-image-578" /></div>



<ul><li><a href="https://www.vicarious.com/">Vicarious</a> website – Dileeps AGI robotics company.</li><li>Twitter: <a href="https://twitter.com/dileeplearning?lang=en">@dileeplearning</a></li><li>The papers we discuss or mention:<ul><li><a href="https://www.biorxiv.org/content/10.1101/2020.09.09.290601v1">A detailed mathematical theory of thalamic and cortical microcircuits based on inference in a generative vision model</a>.</li><li><a href="https://www.frontiersin.org/articles/10.3389/fncom.2020.554097/full">From CAPTCHA to Commonsense: How Brain Can Teach Us About Artificial Intelligence.</a></li></ul></li><li><a href="https://en.wikipedia.org/wiki/Graphical_model">Probabilistic graphical models</a>.</li><li><a href="https://en.wikipedia.org/wiki/Hierarchical_temporal_memory#Comparison_of_neuron_models">Hierarchical temporal memory</a>.</li></ul>



<p>Time Stamps:</p>



<p>0:00 – Intro <br />5:18 – Levels of abstraction <br />7:54 – AGI vs. AHI vs. AUI<br />12:18 – Ideas and failures in startups <br />16:51 – Thalamic cortical circuitry computation  <br />22:07 – Recursive cortical networks <br />23:34 – bio-RCN <br />27:48 – Cortical column as binary random variable <br />33:37 – Clonal neuron roles <br />39:23 – Processing cascade <br />41:10 – Thalamus <br />47:18 – Attention as explaining away <br />50:51 – Comparison with O’Reilly’s predictive coding framework <br />55:39 – Subjective contour effect <br />1:01:20 – Necker cube</p>
]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[








Dileep and I discuss his theoretical account of how the thalamus and cortex work together to implement visual inference. We talked previously about his Recursive Cortical Network (RCN) approach to visual inference, which is a probabilistic graph model that can solve hard problems like CAPTCHAs, and more recently we talked about using his RCNs with cloned units to account for cognitive maps related to the hippocampus. On this episode, we walk through how RCNs can map onto thalamo-cortical circuits so a given cortical column can signal whether it believes some concept or feature is present in the world, based on bottom-up incoming sensory evidence, top-down attention, and lateral related features. We also briefly compare this bio-RCN version with Randy O’Reilly’s Deep Predictive Learning account of thalamo-cortical circuitry. 







Vicarious website – Dileeps AGI robotics company.Twitter: @dileeplearningThe papers we discuss or mention:A detailed mathematical theory of thalamic and cortical microcircuits based on inference in a generative vision model.From CAPTCHA to Commonsense: How Brain Can Teach Us About Artificial Intelligence.Probabilistic graphical models.Hierarchical temporal memory.



Time Stamps:



0:00 – Intro 5:18 – Levels of abstraction 7:54 – AGI vs. AHI vs. AUI12:18 – Ideas and failures in startups 16:51 – Thalamic cortical circuitry computation  22:07 – Recursive cortical networks 23:34 – bio-RCN 27:48 – Cortical column as binary random variable 33:37 – Clonal neuron roles 39:23 – Processing cascade 41:10 – Thalamus 47:18 – Attention as explaining away 50:51 – Comparison with O’Reilly’s predictive coding framework 55:39 – Subjective contour effect 1:01:20 – Necker cube
]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 093 Dileep George: Inference in Brain Microcircuits]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2020/12/art-george-01.jpg" alt="" class="wp-image-1149" />



<div class="wp-block-image"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></div>



<p>Dileep and I discuss his theoretical account of how the thalamus and cortex work together to implement visual inference. We talked previously about his Recursive Cortical Network (RCN) approach to visual inference, which is a probabilistic graph model that <a href="https://braininspired.co/podcast/13/" target="_blank" rel="noreferrer noopener">can solve hard problems like CAPTCHAs</a>, and more recently we talked about using his RCNs with cloned units to <a href="https://braininspired.co/podcast/87/" target="_blank" rel="noreferrer noopener">account for cognitive maps</a> related to the hippocampus. On this episode, we walk through how RCNs can map onto thalamo-cortical circuits so a given cortical column can signal whether it believes some concept or feature is present in the world, based on bottom-up incoming sensory evidence, top-down attention, and lateral related features. We also briefly compare this bio-RCN version with Randy O’Reilly’s <a href="https://braininspired.co/podcast/88/" target="_blank" rel="noreferrer noopener">Deep Predictive Learning</a> account of thalamo-cortical circuitry. </p>



<div class="wp-block-image"><img width="212" height="212" src="https://braininspired.co/wp-content/uploads/2018/10/dileep-212x212.jpg" alt="" class="wp-image-578" /></div>



<ul><li><a href="https://www.vicarious.com/">Vicarious</a> website – Dileeps AGI robotics company.</li><li>Twitter: <a href="https://twitter.com/dileeplearning?lang=en">@dileeplearning</a></li><li>The papers we discuss or mention:<ul><li><a href="https://www.biorxiv.org/content/10.1101/2020.09.09.290601v1">A detailed mathematical theory of thalamic and cortical microcircuits based on inference in a generative vision model</a>.</li><li><a href="https://www.frontiersin.org/articles/10.3389/fncom.2020.554097/full">From CAPTCHA to Commonsense: How Brain Can Teach Us About Artificial Intelligence.</a></li></ul></li><li><a href="https://en.wikipedia.org/wiki/Graphical_model">Probabilistic graphical models</a>.</li><li><a href="https://en.wikipedia.org/wiki/Hierarchical_temporal_memory#Comparison_of_neuron_models">Hierarchical temporal memory</a>.</li></ul>



<p>Time Stamps:</p>



<p>0:00 – Intro <br />5:18 – Levels of abstraction <br />7:54 – AGI vs. AHI vs. AUI<br />12:18 – Ideas and failures in startups <br />16:51 – Thalamic cortical circuitry computation  <br />22:07 – Recursive cortical networks <br />23:34 – bio-RCN <br />27:48 – Cortical column as binary random variable <br />33:37 – Clonal neuron roles <br />39:23 – Processing cascade <br />41:10 – Thalamus <br />47:18 – Attention as explaining away <br />50:51 – Comparison with O’Reilly’s predictive coding framework <br />55:39 – Subjective contour effect <br />1:01:20 – Necker cube</p>
]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/093-Dileep-George-public.mp3" length="64152263"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[








Dileep and I discuss his theoretical account of how the thalamus and cortex work together to implement visual inference. We talked previously about his Recursive Cortical Network (RCN) approach to visual inference, which is a probabilistic graph model that can solve hard problems like CAPTCHAs, and more recently we talked about using his RCNs with cloned units to account for cognitive maps related to the hippocampus. On this episode, we walk through how RCNs can map onto thalamo-cortical circuits so a given cortical column can signal whether it believes some concept or feature is present in the world, based on bottom-up incoming sensory evidence, top-down attention, and lateral related features. We also briefly compare this bio-RCN version with Randy O’Reilly’s Deep Predictive Learning account of thalamo-cortical circuitry. 







Vicarious website – Dileeps AGI robotics company.Twitter: @dileeplearningThe papers we discuss or mention:A detailed mathematical theory of thalamic and cortical microcircuits based on inference in a generative vision model.From CAPTCHA to Commonsense: How Brain Can Teach Us About Artificial Intelligence.Probabilistic graphical models.Hierarchical temporal memory.



Time Stamps:



0:00 – Intro 5:18 – Levels of abstraction 7:54 – AGI vs. AHI vs. AUI12:18 – Ideas and failures in startups 16:51 – Thalamic cortical circuitry computation  22:07 – Recursive cortical networks 23:34 – bio-RCN 27:48 – Cortical column as binary random variable 33:37 – Clonal neuron roles 39:23 – Processing cascade 41:10 – Thalamus 47:18 – Attention as explaining away 50:51 – Comparison with O’Reilly’s predictive coding framework 55:39 – Subjective contour effect 1:01:20 – Necker cube
]]>
                </itunes:summary>
                                    <itunes:image href="https://episodes.castos.com/braininspired/images/art-george-01.jpg"></itunes:image>
                                                                            <itunes:duration>01:06:31</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 092 Russ Poldrack: Cognitive Ontologies]]>
                </title>
                <pubDate>Tue, 15 Dec 2020 11:45:27 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-092-russ-poldrack-cognitive-ontologies</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-092-russ-poldrack-cognitive-ontologies</link>
                                <description>
                                            <![CDATA[
<img src="https://braininspired.co/wp-content/uploads/2020/12/art-poldrack-01.jpg" alt="" class="wp-image-1145" />



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></div>



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2020/12/russhead.jpeg" alt="" class="wp-image-1146" width="175" height="175" /></div>



<p>Russ and I discuss cognitive ontologies - the "parts" of the mind and their relations - as an ongoing dilemma of how to map onto each other what we know about brains and what we know about minds. We talk about whether we have the right ontology now, how he uses both top-down and data-driven approaches to analyze and refine current ontologies, and how all this has affected his own thinking about minds. We also discuss some of the current  meta-science issues and challenges in neuroscience  and AI, and Russ answers guest questions from <a href="https://braininspired.co/podcast/26/">Kendrick Kay</a> and <a href="https://braininspired.co/podcast/84/">David Poeppel</a>.</p>



<div class="wp-block-image"><a href="https://www.amazon.com/gp/product/0691178615/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=0691178615&amp;linkId=d5504f7983f2bf7314b761e51934dff6" target="_blank" rel="noreferrer noopener"><img src="https://braininspired.co/wp-content/uploads/2020/12/newmindreaders.jpg" alt="" class="wp-image-1147" width="166" height="250" /></a></div>



<ul><li><a href="http://www.russpoldrack.org/" target="_blank" rel="noreferrer noopener">Russ’s website</a>.</li><li><a href="https://poldracklab.stanford.edu/" target="_blank" rel="noreferrer noopener">Poldrack Lab</a>.</li><li><a href="https://reproducibility.stanford.edu/" target="_blank" rel="noreferrer noopener">Stanford Center For Reproducible Neuroscience.</a></li><li>Twitter: <a href="https://twitter.com/russpoldrack" target="_blank" rel="noreferrer noopener">@russpoldrack</a>.</li><li>Book:<ul><li><a href="https://www.amazon.com/gp/product/0691178615/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=0691178615&amp;linkId=d5504f7983f2bf7314b761e51934dff6">The New Mind Readers: What Neuroimaging Can and </a><a href="https://www.amazon.com/gp/product/0691178615/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=0691178615&amp;linkId=d5504f7983f2bf7314b761e51934dff6" target="_blank" rel="noreferrer noopener">Cannot</a><a href="https://www.amazon.com/gp/product/0691178615/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=0691178615&amp;linkId=d5504f7983f2bf7314b761e51934dff6"> Reveal about Our Thoughts</a>.</li></ul></li><li>The papers we discuss or mention:<ul><li><a href="https://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1006565">Atlases of cognition with large-scale human brain mapping</a>.</li><li><a href="https://pubmed.ncbi.nlm.nih.gov/25076977/">Mapping Mental Function to Brain Structure: How Can Cognitive Neuroimaging Succeed?</a></li><li><a href="http://pilab.psy.utexas.edu/publications/Poldrack_Annual_Reviews_2016.pdf">From Brain Maps to Cognitive Ontologies: Informatics and the Search for Mental Structure.</a></li><li><a href="https://www.nature.com/articles/s41467-019-10301-1">Uncovering the structure of self-regulation through data-driven ontology discovery</a></li></ul></li><li>Talks:<ul><li>Reproducibility: <a href="https://www.youtube.com/watch?v=E9vp1JyYGHY">NeuroHackademy: Russell Poldrack - Reproducibility in fMRI: What is the problem?</a></li><li>Cognitive Ontology: <a href="https://www.youtube.com/watch?v=2acb4YlT1V8">Cognitive Ontologies, from Top to Bottom</a></li><li>A good series of talks about cognitive ontologies: <a href="https://www.youtube.com/playlist?l..."></a></li></ul></li></ul>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[












Russ and I discuss cognitive ontologies - the "parts" of the mind and their relations - as an ongoing dilemma of how to map onto each other what we know about brains and what we know about minds. We talk about whether we have the right ontology now, how he uses both top-down and data-driven approaches to analyze and refine current ontologies, and how all this has affected his own thinking about minds. We also discuss some of the current  meta-science issues and challenges in neuroscience  and AI, and Russ answers guest questions from Kendrick Kay and David Poeppel.







Russ’s website.Poldrack Lab.Stanford Center For Reproducible Neuroscience.Twitter: @russpoldrack.Book:The New Mind Readers: What Neuroimaging Can and Cannot Reveal about Our Thoughts.The papers we discuss or mention:Atlases of cognition with large-scale human brain mapping.Mapping Mental Function to Brain Structure: How Can Cognitive Neuroimaging Succeed?From Brain Maps to Cognitive Ontologies: Informatics and the Search for Mental Structure.Uncovering the structure of self-regulation through data-driven ontology discoveryTalks:Reproducibility: NeuroHackademy: Russell Poldrack - Reproducibility in fMRI: What is the problem?Cognitive Ontology: Cognitive Ontologies, from Top to BottomA good series of talks about cognitive ontologies: ]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 092 Russ Poldrack: Cognitive Ontologies]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[
<img src="https://braininspired.co/wp-content/uploads/2020/12/art-poldrack-01.jpg" alt="" class="wp-image-1145" />



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></div>



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2020/12/russhead.jpeg" alt="" class="wp-image-1146" width="175" height="175" /></div>



<p>Russ and I discuss cognitive ontologies - the "parts" of the mind and their relations - as an ongoing dilemma of how to map onto each other what we know about brains and what we know about minds. We talk about whether we have the right ontology now, how he uses both top-down and data-driven approaches to analyze and refine current ontologies, and how all this has affected his own thinking about minds. We also discuss some of the current  meta-science issues and challenges in neuroscience  and AI, and Russ answers guest questions from <a href="https://braininspired.co/podcast/26/">Kendrick Kay</a> and <a href="https://braininspired.co/podcast/84/">David Poeppel</a>.</p>



<div class="wp-block-image"><a href="https://www.amazon.com/gp/product/0691178615/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=0691178615&amp;linkId=d5504f7983f2bf7314b761e51934dff6" target="_blank" rel="noreferrer noopener"><img src="https://braininspired.co/wp-content/uploads/2020/12/newmindreaders.jpg" alt="" class="wp-image-1147" width="166" height="250" /></a></div>



<ul><li><a href="http://www.russpoldrack.org/" target="_blank" rel="noreferrer noopener">Russ’s website</a>.</li><li><a href="https://poldracklab.stanford.edu/" target="_blank" rel="noreferrer noopener">Poldrack Lab</a>.</li><li><a href="https://reproducibility.stanford.edu/" target="_blank" rel="noreferrer noopener">Stanford Center For Reproducible Neuroscience.</a></li><li>Twitter: <a href="https://twitter.com/russpoldrack" target="_blank" rel="noreferrer noopener">@russpoldrack</a>.</li><li>Book:<ul><li><a href="https://www.amazon.com/gp/product/0691178615/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=0691178615&amp;linkId=d5504f7983f2bf7314b761e51934dff6">The New Mind Readers: What Neuroimaging Can and </a><a href="https://www.amazon.com/gp/product/0691178615/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=0691178615&amp;linkId=d5504f7983f2bf7314b761e51934dff6" target="_blank" rel="noreferrer noopener">Cannot</a><a href="https://www.amazon.com/gp/product/0691178615/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=0691178615&amp;linkId=d5504f7983f2bf7314b761e51934dff6"> Reveal about Our Thoughts</a>.</li></ul></li><li>The papers we discuss or mention:<ul><li><a href="https://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1006565">Atlases of cognition with large-scale human brain mapping</a>.</li><li><a href="https://pubmed.ncbi.nlm.nih.gov/25076977/">Mapping Mental Function to Brain Structure: How Can Cognitive Neuroimaging Succeed?</a></li><li><a href="http://pilab.psy.utexas.edu/publications/Poldrack_Annual_Reviews_2016.pdf">From Brain Maps to Cognitive Ontologies: Informatics and the Search for Mental Structure.</a></li><li><a href="https://www.nature.com/articles/s41467-019-10301-1">Uncovering the structure of self-regulation through data-driven ontology discovery</a></li></ul></li><li>Talks:<ul><li>Reproducibility: <a href="https://www.youtube.com/watch?v=E9vp1JyYGHY">NeuroHackademy: Russell Poldrack - Reproducibility in fMRI: What is the problem?</a></li><li>Cognitive Ontology: <a href="https://www.youtube.com/watch?v=2acb4YlT1V8">Cognitive Ontologies, from Top to Bottom</a></li><li>A good series of talks about cognitive ontologies: <a href="https://www.youtube.com/playlist?list=PLj4TQJQsJI1aVEyfOy91WybhKYcRSDxng">Online Seminar Series: Problem of Cognitive Ontology</a>.</li></ul></li></ul>



<p><strong>Some take-home points:</strong></p>



<ul><li>Our folk psychological cognitive ontology hasn't changed much since early Greek Philosophy, and especially since William James wrote about attention, consciousness, and so on.</li><li>Using encoding models, we can predict brain responses pretty well based on what task a subject is performing or what "cognitive function" a subject is engaging, at least to a course approximation.</li><li>Using a data-driven approach has potential to help determine mental structure, but important human decisions must still be made regarding how exactly to divide up the various "parts" of the mind.</li></ul>



<p><strong>Time points</strong><br />0:00 - Introduction <br />5:59 - Meta-science issues <br />19:00 - Kendrick Kay question <br />23:00 - State of the field <br />30:06 - fMRI for understanding minds <br />35:13 - Computational mind <br />42:10 - Cognitive ontology <br />45:17 - Cognitive Atlas <br />52:05 - David Poeppel question <br />57:00 - Does ontology matter? <br />59:18 - Data-driven ontology <br />1:12:29 - Dynamical systems approach <br />1:16:25 - György Buzsáki's inside-out approach <br />1:22:26 - Ontology for AI <br />1:27:39 - Deep learning hype </p>
]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/092-Russ-Poldrack.mp3" length="98413480"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[












Russ and I discuss cognitive ontologies - the "parts" of the mind and their relations - as an ongoing dilemma of how to map onto each other what we know about brains and what we know about minds. We talk about whether we have the right ontology now, how he uses both top-down and data-driven approaches to analyze and refine current ontologies, and how all this has affected his own thinking about minds. We also discuss some of the current  meta-science issues and challenges in neuroscience  and AI, and Russ answers guest questions from Kendrick Kay and David Poeppel.







Russ’s website.Poldrack Lab.Stanford Center For Reproducible Neuroscience.Twitter: @russpoldrack.Book:The New Mind Readers: What Neuroimaging Can and Cannot Reveal about Our Thoughts.The papers we discuss or mention:Atlases of cognition with large-scale human brain mapping.Mapping Mental Function to Brain Structure: How Can Cognitive Neuroimaging Succeed?From Brain Maps to Cognitive Ontologies: Informatics and the Search for Mental Structure.Uncovering the structure of self-regulation through data-driven ontology discoveryTalks:Reproducibility: NeuroHackademy: Russell Poldrack - Reproducibility in fMRI: What is the problem?Cognitive Ontology: Cognitive Ontologies, from Top to BottomA good series of talks about cognitive ontologies: ]]>
                </itunes:summary>
                                    <itunes:image href="https://episodes.castos.com/braininspired/images/art-poldrack-01.jpg"></itunes:image>
                                                                            <itunes:duration>01:42:12</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 091 Carsen Stringer: Understanding 40,000 Neurons]]>
                </title>
                <pubDate>Thu, 03 Dec 2020 20:23:58 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-091-carsen-stringer-understanding-40000-neurons</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-091-carsen-stringer-understanding-40000-neurons</link>
                                <description>
                                            <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2020/12/art-stringer-01-1.jpg" alt="" class="wp-image-1142" />



<div class="wp-block-image"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></div>



<div class="wp-block-image"><img width="250" height="312" src="https://braininspired.co/wp-content/uploads/2020/12/carsen.jpg" alt="" class="wp-image-1141" /></div>



<p>Carsen and I discuss how she uses 2-photon calcium imaging data from over 10,000 neurons to understand the information processing of such large neural population activity. We talk about the tools she makes and uses to analyze the data, and the type of high-dimensional neural activity structure they found, which seems to allow efficient and robust information processing. We also talk about how these findings may help build better deep learning networks, and Carsen’s thoughts on how to improve the diversity, inclusivity, and equality in neuroscience research labs. Guest question from <a href="https://braininspired.co/podcast/89" target="_blank" rel="noreferrer noopener">Matt Smith</a>.</p>



<ul><li><a href="https://www.janelia.org/lab/stringer-lab" target="_blank" rel="noreferrer noopener">Stringer Lab</a>.</li><li>Twitter: <a href="https://twitter.com/computingnature?lang=en" target="_blank" rel="noreferrer noopener">@computingnature</a>.</li><li>The papers we discuss or mention:<ul><li><a href="https://www.biorxiv.org/content/10.1101/374090v2" target="_blank" rel="noreferrer noopener">High-dimensional geometry of population responses in visual cortex</a></li><li><a href="https://www.biorxiv.org/content/10.1101/306019v2" target="_blank" rel="noreferrer noopener">Spontaneous behaviors drive multidimensional, brain-wide population activity.</a></li></ul></li></ul>



<p>Timestamps:</p>



<p>0:00 – Intro <br />5:51 – Recording &gt; 10k neurons <br />8:51 – 2-photon calcium imaging <br />14:56 – Balancing scientific questions and tools <br />21:16 – Unsupervised learning tools and rastermap <br />26:14 – Manifolds <br />32:13 – Matt Smith question <br />37:06 – Dimensionality of neural activity <br />58:51 – Future plans <br />1:00:30- What can AI learn from this? <br />1:13:26 – Diversity, inclusivity, equality</p>
]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[












Carsen and I discuss how she uses 2-photon calcium imaging data from over 10,000 neurons to understand the information processing of such large neural population activity. We talk about the tools she makes and uses to analyze the data, and the type of high-dimensional neural activity structure they found, which seems to allow efficient and robust information processing. We also talk about how these findings may help build better deep learning networks, and Carsen’s thoughts on how to improve the diversity, inclusivity, and equality in neuroscience research labs. Guest question from Matt Smith.



Stringer Lab.Twitter: @computingnature.The papers we discuss or mention:High-dimensional geometry of population responses in visual cortexSpontaneous behaviors drive multidimensional, brain-wide population activity.



Timestamps:



0:00 – Intro 5:51 – Recording > 10k neurons 8:51 – 2-photon calcium imaging 14:56 – Balancing scientific questions and tools 21:16 – Unsupervised learning tools and rastermap 26:14 – Manifolds 32:13 – Matt Smith question 37:06 – Dimensionality of neural activity 58:51 – Future plans 1:00:30- What can AI learn from this? 1:13:26 – Diversity, inclusivity, equality
]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 091 Carsen Stringer: Understanding 40,000 Neurons]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2020/12/art-stringer-01-1.jpg" alt="" class="wp-image-1142" />



<div class="wp-block-image"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></div>



<div class="wp-block-image"><img width="250" height="312" src="https://braininspired.co/wp-content/uploads/2020/12/carsen.jpg" alt="" class="wp-image-1141" /></div>



<p>Carsen and I discuss how she uses 2-photon calcium imaging data from over 10,000 neurons to understand the information processing of such large neural population activity. We talk about the tools she makes and uses to analyze the data, and the type of high-dimensional neural activity structure they found, which seems to allow efficient and robust information processing. We also talk about how these findings may help build better deep learning networks, and Carsen’s thoughts on how to improve the diversity, inclusivity, and equality in neuroscience research labs. Guest question from <a href="https://braininspired.co/podcast/89" target="_blank" rel="noreferrer noopener">Matt Smith</a>.</p>



<ul><li><a href="https://www.janelia.org/lab/stringer-lab" target="_blank" rel="noreferrer noopener">Stringer Lab</a>.</li><li>Twitter: <a href="https://twitter.com/computingnature?lang=en" target="_blank" rel="noreferrer noopener">@computingnature</a>.</li><li>The papers we discuss or mention:<ul><li><a href="https://www.biorxiv.org/content/10.1101/374090v2" target="_blank" rel="noreferrer noopener">High-dimensional geometry of population responses in visual cortex</a></li><li><a href="https://www.biorxiv.org/content/10.1101/306019v2" target="_blank" rel="noreferrer noopener">Spontaneous behaviors drive multidimensional, brain-wide population activity.</a></li></ul></li></ul>



<p>Timestamps:</p>



<p>0:00 – Intro <br />5:51 – Recording &gt; 10k neurons <br />8:51 – 2-photon calcium imaging <br />14:56 – Balancing scientific questions and tools <br />21:16 – Unsupervised learning tools and rastermap <br />26:14 – Manifolds <br />32:13 – Matt Smith question <br />37:06 – Dimensionality of neural activity <br />58:51 – Future plans <br />1:00:30- What can AI learn from this? <br />1:13:26 – Diversity, inclusivity, equality</p>
]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/091-Carsen-Stringer-Public.mp3" length="85082006"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[












Carsen and I discuss how she uses 2-photon calcium imaging data from over 10,000 neurons to understand the information processing of such large neural population activity. We talk about the tools she makes and uses to analyze the data, and the type of high-dimensional neural activity structure they found, which seems to allow efficient and robust information processing. We also talk about how these findings may help build better deep learning networks, and Carsen’s thoughts on how to improve the diversity, inclusivity, and equality in neuroscience research labs. Guest question from Matt Smith.



Stringer Lab.Twitter: @computingnature.The papers we discuss or mention:High-dimensional geometry of population responses in visual cortexSpontaneous behaviors drive multidimensional, brain-wide population activity.



Timestamps:



0:00 – Intro 5:51 – Recording > 10k neurons 8:51 – 2-photon calcium imaging 14:56 – Balancing scientific questions and tools 21:16 – Unsupervised learning tools and rastermap 26:14 – Manifolds 32:13 – Matt Smith question 37:06 – Dimensionality of neural activity 58:51 – Future plans 1:00:30- What can AI learn from this? 1:13:26 – Diversity, inclusivity, equality
]]>
                </itunes:summary>
                                    <itunes:image href="https://episodes.castos.com/braininspired/images/art-stringer-01.jpg"></itunes:image>
                                                                            <itunes:duration>01:28:19</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 090 Chris Eliasmith: Building the Human Brain]]>
                </title>
                <pubDate>Mon, 23 Nov 2020 11:44:24 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-090-chris-eliasmith-building-the-human-brain</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-090-chris-eliasmith-building-the-human-brain</link>
                                <description>
                                            <![CDATA[
<img src="https://braininspired.co/wp-content/uploads/2020/11/art-eliasmith-01-1.jpg" alt="" class="wp-image-1135" />



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2020/11/eliasmithhead.jpg" alt="" class="wp-image-1136" width="242" height="269" /></div>



<p>Chris and I discuss his Spaun large scale model of the human brain (Semantic Pointer Architecture Unified Network), as detailed in his book <a href="https://www.amazon.com/gp/product/0190262125/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=0190262125&amp;linkId=a1e669c8873aeb967dc53d078ce0ec65" target="_blank" rel="noreferrer noopener">How to Build a Brain.</a> We talk about his philosophical approach, how Spaun compares to Randy O'Reilly's Leabra networks, the Applied Brain Research Chris co-founded, and I have guest questions from <a href="https://braininspired.co/podcast/41/" target="_blank" rel="noreferrer noopener">Brad Aimone</a>, <a href="https://braininspired.co/podcast/bi-001-steven-potter-brains-in-dishes/" target="_blank" rel="noreferrer noopener">Steve Potter</a>, and <a href="https://braininspired.co/podcast/88/" target="_blank" rel="noreferrer noopener">Randy O'Reilly</a>.</p>



<ul><li><a href="http://compneuro.uwaterloo.ca/" target="_blank" rel="noreferrer noopener">Chris's website.</a></li><li><a href="https://appliedbrainresearch.com/" target="_blank" rel="noreferrer noopener">Applied Brain Research.</a></li><li>The book: <a href="https://www.amazon.com/gp/product/0190262125/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=0190262125&amp;linkId=a1e669c8873aeb967dc53d078ce0ec65" target="_blank" rel="noreferrer noopener">How to Build a Brain.</a></li><li><a href="https://www.nengo.ai/" target="_blank" rel="noreferrer noopener">Nengo</a> (you can run Spaun).</li><li>Paper summary of Spaun: <a href="" target="_blank" rel="noreferrer noopener">A large-scale model of the functioning brain.</a></li></ul>



<img src="https://braininspired.co/wp-content/uploads/2020/11/htbab.jpg" alt="" class="wp-image-1137" width="263" height="374" />



<p>Some takeaways:</p>



<ul><li>Spaun is an embodied fully functional cognitive architecture with one eye for task instructions and an arm for responses.</li><li>Chris uses elements from symbolic, connectionist, and dynamical systems approaches in cognitive science.</li><li>The neural engineering framework (NEF) is how functions get instantiated in spiking neural networks.</li><li>The semantic pointer architecture (SPA) is how representations are stored and transformed - i.e. the symbolic-like cognitive processing.</li></ul>



<p>Time Points:</p>



<p>0:00 - Intro<br />2:29 - Sense of awe <br />6:20 - Large-scale models <br />9:24 - Descriptive pragmatism <br />15:43 - Asking better questions <br />22:48 - Brad Aimone question: Neural engineering framework <br />29:07 - Engineering to build vs. understand <br />32:12 - Why is AI world not interested in brains/minds?<br />37:09 - Steve Potter neuromorphics question <br />44:51 - Spaun <br />49:33 - Semantic Pointer Architecture <br />56:04 - Representations <br />58:21 - Randy O'Reilly question 1 <br />1:07:33 - Randy O'Reilly question 2<br />1:10:31 - Spaun vs. Leabra <br />1:32:43 - How would Chris start over? </p>
]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[








Chris and I discuss his Spaun large scale model of the human brain (Semantic Pointer Architecture Unified Network), as detailed in his book How to Build a Brain. We talk about his philosophical approach, how Spaun compares to Randy O'Reilly's Leabra networks, the Applied Brain Research Chris co-founded, and I have guest questions from Brad Aimone, Steve Potter, and Randy O'Reilly.



Chris's website.Applied Brain Research.The book: How to Build a Brain.Nengo (you can run Spaun).Paper summary of Spaun: A large-scale model of the functioning brain.







Some takeaways:



Spaun is an embodied fully functional cognitive architecture with one eye for task instructions and an arm for responses.Chris uses elements from symbolic, connectionist, and dynamical systems approaches in cognitive science.The neural engineering framework (NEF) is how functions get instantiated in spiking neural networks.The semantic pointer architecture (SPA) is how representations are stored and transformed - i.e. the symbolic-like cognitive processing.



Time Points:



0:00 - Intro2:29 - Sense of awe 6:20 - Large-scale models 9:24 - Descriptive pragmatism 15:43 - Asking better questions 22:48 - Brad Aimone question: Neural engineering framework 29:07 - Engineering to build vs. understand 32:12 - Why is AI world not interested in brains/minds?37:09 - Steve Potter neuromorphics question 44:51 - Spaun 49:33 - Semantic Pointer Architecture 56:04 - Representations 58:21 - Randy O'Reilly question 1 1:07:33 - Randy O'Reilly question 21:10:31 - Spaun vs. Leabra 1:32:43 - How would Chris start over? 
]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 090 Chris Eliasmith: Building the Human Brain]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[
<img src="https://braininspired.co/wp-content/uploads/2020/11/art-eliasmith-01-1.jpg" alt="" class="wp-image-1135" />



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2020/11/eliasmithhead.jpg" alt="" class="wp-image-1136" width="242" height="269" /></div>



<p>Chris and I discuss his Spaun large scale model of the human brain (Semantic Pointer Architecture Unified Network), as detailed in his book <a href="https://www.amazon.com/gp/product/0190262125/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=0190262125&amp;linkId=a1e669c8873aeb967dc53d078ce0ec65" target="_blank" rel="noreferrer noopener">How to Build a Brain.</a> We talk about his philosophical approach, how Spaun compares to Randy O'Reilly's Leabra networks, the Applied Brain Research Chris co-founded, and I have guest questions from <a href="https://braininspired.co/podcast/41/" target="_blank" rel="noreferrer noopener">Brad Aimone</a>, <a href="https://braininspired.co/podcast/bi-001-steven-potter-brains-in-dishes/" target="_blank" rel="noreferrer noopener">Steve Potter</a>, and <a href="https://braininspired.co/podcast/88/" target="_blank" rel="noreferrer noopener">Randy O'Reilly</a>.</p>



<ul><li><a href="http://compneuro.uwaterloo.ca/" target="_blank" rel="noreferrer noopener">Chris's website.</a></li><li><a href="https://appliedbrainresearch.com/" target="_blank" rel="noreferrer noopener">Applied Brain Research.</a></li><li>The book: <a href="https://www.amazon.com/gp/product/0190262125/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=0190262125&amp;linkId=a1e669c8873aeb967dc53d078ce0ec65" target="_blank" rel="noreferrer noopener">How to Build a Brain.</a></li><li><a href="https://www.nengo.ai/" target="_blank" rel="noreferrer noopener">Nengo</a> (you can run Spaun).</li><li>Paper summary of Spaun: <a href="" target="_blank" rel="noreferrer noopener">A large-scale model of the functioning brain.</a></li></ul>



<img src="https://braininspired.co/wp-content/uploads/2020/11/htbab.jpg" alt="" class="wp-image-1137" width="263" height="374" />



<p>Some takeaways:</p>



<ul><li>Spaun is an embodied fully functional cognitive architecture with one eye for task instructions and an arm for responses.</li><li>Chris uses elements from symbolic, connectionist, and dynamical systems approaches in cognitive science.</li><li>The neural engineering framework (NEF) is how functions get instantiated in spiking neural networks.</li><li>The semantic pointer architecture (SPA) is how representations are stored and transformed - i.e. the symbolic-like cognitive processing.</li></ul>



<p>Time Points:</p>



<p>0:00 - Intro<br />2:29 - Sense of awe <br />6:20 - Large-scale models <br />9:24 - Descriptive pragmatism <br />15:43 - Asking better questions <br />22:48 - Brad Aimone question: Neural engineering framework <br />29:07 - Engineering to build vs. understand <br />32:12 - Why is AI world not interested in brains/minds?<br />37:09 - Steve Potter neuromorphics question <br />44:51 - Spaun <br />49:33 - Semantic Pointer Architecture <br />56:04 - Representations <br />58:21 - Randy O'Reilly question 1 <br />1:07:33 - Randy O'Reilly question 2<br />1:10:31 - Spaun vs. Leabra <br />1:32:43 - How would Chris start over? </p>
]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/090-Chris-Eliasmith-public.mp3" length="95293256"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[








Chris and I discuss his Spaun large scale model of the human brain (Semantic Pointer Architecture Unified Network), as detailed in his book How to Build a Brain. We talk about his philosophical approach, how Spaun compares to Randy O'Reilly's Leabra networks, the Applied Brain Research Chris co-founded, and I have guest questions from Brad Aimone, Steve Potter, and Randy O'Reilly.



Chris's website.Applied Brain Research.The book: How to Build a Brain.Nengo (you can run Spaun).Paper summary of Spaun: A large-scale model of the functioning brain.







Some takeaways:



Spaun is an embodied fully functional cognitive architecture with one eye for task instructions and an arm for responses.Chris uses elements from symbolic, connectionist, and dynamical systems approaches in cognitive science.The neural engineering framework (NEF) is how functions get instantiated in spiking neural networks.The semantic pointer architecture (SPA) is how representations are stored and transformed - i.e. the symbolic-like cognitive processing.



Time Points:



0:00 - Intro2:29 - Sense of awe 6:20 - Large-scale models 9:24 - Descriptive pragmatism 15:43 - Asking better questions 22:48 - Brad Aimone question: Neural engineering framework 29:07 - Engineering to build vs. understand 32:12 - Why is AI world not interested in brains/minds?37:09 - Steve Potter neuromorphics question 44:51 - Spaun 49:33 - Semantic Pointer Architecture 56:04 - Representations 58:21 - Randy O'Reilly question 1 1:07:33 - Randy O'Reilly question 21:10:31 - Spaun vs. Leabra 1:32:43 - How would Chris start over? 
]]>
                </itunes:summary>
                                    <itunes:image href="https://episodes.castos.com/braininspired/images/art-eliasmith-01-1.jpg"></itunes:image>
                                                                            <itunes:duration>01:38:57</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 089 Matt Smith: Drifting Cognition]]>
                </title>
                <pubDate>Thu, 12 Nov 2020 15:54:33 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-089-matt-smith-drifting-cognition</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-089-matt-smith-drifting-cognition</link>
                                <description>
                                            <![CDATA[
<img src="https://braininspired.co/wp-content/uploads/2020/11/art-smith-01.jpg" alt="" class="wp-image-1131" />



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></div>



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2020/11/matt_photo-300x300-1.jpg" alt="" class="wp-image-1132" width="225" height="225" /></div>



<p>Matt and I discuss how cognition and behavior drifts over the course of minutes and hours, and how global brain activity drifts with it. How does the brain continue to produce steady perception and action in the midst of such drift? We also talk about how to think about variability in neural activity. How much of it is noise and how much of it is hidden important activity? Finally, we discuss the effect of recording more and more neurons simultaneously, collecting bigger and bigger datasets, plus guest questions from Adam Snyder and <a href="https://braininspired.co/podcast/71/">Patrick Mayo</a>.</p>



<ul><li><a href="https://smithlab.net/">Smith Lab</a>.</li><li>Twitter: <a href="https://twitter.com/SmithLabNeuro">@SmithLabNeuro</a>.</li><li>Related:<ul><li><a href="https://doi.org/10.1016/j.neuron.2020.07.021">Slow drift of neural activity as a signature of impulsivity in macaque visual and prefrontal cortex.</a></li></ul></li><li>Artwork by Melissa Neely</li></ul>



<p><strong>Take home points:</strong></p>



<ul><li>The “noise” in the variability of neural activity is likely just activity devoted to processing other things.</li><li>Recording lots of neurons simultaneously helps resolve the question of what’s noise and how much information is in a population of neurons.</li><li>There’s a neural signature of the behavioral “slow drift” of our internal cognitive state.</li><li>The neural signature is global, and it’s an open question how the brain compensates to produce steady perception and action.</li></ul>



<p><strong>Timestamps:</strong></p>



<p>0:00 - Intro <br />4:35 - Adam Snyder question  <br />15:26 - Multi-electrode recordings  <br />17:48 - What is noise in the brain?  <br />23:55 - How many neurons is enough?  <br />27:43 - Patrick Mayo question  <br />33:17 - Slow drift  <br />54:10 - Impulsivity  <br />57:32 - How does drift happen?  <br />59:49 - Relation to AI  <br />1:06:58 - What AI and neuro can teach each other  <br />1:10:02 - Ecologically valid behavior  <br />1:14:39 - Brain mechanisms vs. mind  <br />1:17:36 - Levels of description  <br />1:21:14 - Hard things to make in AI  <br />1:22:48 - Best scientific moment </p>
]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[












Matt and I discuss how cognition and behavior drifts over the course of minutes and hours, and how global brain activity drifts with it. How does the brain continue to produce steady perception and action in the midst of such drift? We also talk about how to think about variability in neural activity. How much of it is noise and how much of it is hidden important activity? Finally, we discuss the effect of recording more and more neurons simultaneously, collecting bigger and bigger datasets, plus guest questions from Adam Snyder and Patrick Mayo.



Smith Lab.Twitter: @SmithLabNeuro.Related:Slow drift of neural activity as a signature of impulsivity in macaque visual and prefrontal cortex.Artwork by Melissa Neely



Take home points:



The “noise” in the variability of neural activity is likely just activity devoted to processing other things.Recording lots of neurons simultaneously helps resolve the question of what’s noise and how much information is in a population of neurons.There’s a neural signature of the behavioral “slow drift” of our internal cognitive state.The neural signature is global, and it’s an open question how the brain compensates to produce steady perception and action.



Timestamps:



0:00 - Intro 4:35 - Adam Snyder question  15:26 - Multi-electrode recordings  17:48 - What is noise in the brain?  23:55 - How many neurons is enough?  27:43 - Patrick Mayo question  33:17 - Slow drift  54:10 - Impulsivity  57:32 - How does drift happen?  59:49 - Relation to AI  1:06:58 - What AI and neuro can teach each other  1:10:02 - Ecologically valid behavior  1:14:39 - Brain mechanisms vs. mind  1:17:36 - Levels of description  1:21:14 - Hard things to make in AI  1:22:48 - Best scientific moment 
]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 089 Matt Smith: Drifting Cognition]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[
<img src="https://braininspired.co/wp-content/uploads/2020/11/art-smith-01.jpg" alt="" class="wp-image-1131" />



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></div>



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2020/11/matt_photo-300x300-1.jpg" alt="" class="wp-image-1132" width="225" height="225" /></div>



<p>Matt and I discuss how cognition and behavior drifts over the course of minutes and hours, and how global brain activity drifts with it. How does the brain continue to produce steady perception and action in the midst of such drift? We also talk about how to think about variability in neural activity. How much of it is noise and how much of it is hidden important activity? Finally, we discuss the effect of recording more and more neurons simultaneously, collecting bigger and bigger datasets, plus guest questions from Adam Snyder and <a href="https://braininspired.co/podcast/71/">Patrick Mayo</a>.</p>



<ul><li><a href="https://smithlab.net/">Smith Lab</a>.</li><li>Twitter: <a href="https://twitter.com/SmithLabNeuro">@SmithLabNeuro</a>.</li><li>Related:<ul><li><a href="https://doi.org/10.1016/j.neuron.2020.07.021">Slow drift of neural activity as a signature of impulsivity in macaque visual and prefrontal cortex.</a></li></ul></li><li>Artwork by Melissa Neely</li></ul>



<p><strong>Take home points:</strong></p>



<ul><li>The “noise” in the variability of neural activity is likely just activity devoted to processing other things.</li><li>Recording lots of neurons simultaneously helps resolve the question of what’s noise and how much information is in a population of neurons.</li><li>There’s a neural signature of the behavioral “slow drift” of our internal cognitive state.</li><li>The neural signature is global, and it’s an open question how the brain compensates to produce steady perception and action.</li></ul>



<p><strong>Timestamps:</strong></p>



<p>0:00 - Intro <br />4:35 - Adam Snyder question  <br />15:26 - Multi-electrode recordings  <br />17:48 - What is noise in the brain?  <br />23:55 - How many neurons is enough?  <br />27:43 - Patrick Mayo question  <br />33:17 - Slow drift  <br />54:10 - Impulsivity  <br />57:32 - How does drift happen?  <br />59:49 - Relation to AI  <br />1:06:58 - What AI and neuro can teach each other  <br />1:10:02 - Ecologically valid behavior  <br />1:14:39 - Brain mechanisms vs. mind  <br />1:17:36 - Levels of description  <br />1:21:14 - Hard things to make in AI  <br />1:22:48 - Best scientific moment </p>
]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/089-Matt-Smith-Public.mp3" length="83699789"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[












Matt and I discuss how cognition and behavior drifts over the course of minutes and hours, and how global brain activity drifts with it. How does the brain continue to produce steady perception and action in the midst of such drift? We also talk about how to think about variability in neural activity. How much of it is noise and how much of it is hidden important activity? Finally, we discuss the effect of recording more and more neurons simultaneously, collecting bigger and bigger datasets, plus guest questions from Adam Snyder and Patrick Mayo.



Smith Lab.Twitter: @SmithLabNeuro.Related:Slow drift of neural activity as a signature of impulsivity in macaque visual and prefrontal cortex.Artwork by Melissa Neely



Take home points:



The “noise” in the variability of neural activity is likely just activity devoted to processing other things.Recording lots of neurons simultaneously helps resolve the question of what’s noise and how much information is in a population of neurons.There’s a neural signature of the behavioral “slow drift” of our internal cognitive state.The neural signature is global, and it’s an open question how the brain compensates to produce steady perception and action.



Timestamps:



0:00 - Intro 4:35 - Adam Snyder question  15:26 - Multi-electrode recordings  17:48 - What is noise in the brain?  23:55 - How many neurons is enough?  27:43 - Patrick Mayo question  33:17 - Slow drift  54:10 - Impulsivity  57:32 - How does drift happen?  59:49 - Relation to AI  1:06:58 - What AI and neuro can teach each other  1:10:02 - Ecologically valid behavior  1:14:39 - Brain mechanisms vs. mind  1:17:36 - Levels of description  1:21:14 - Hard things to make in AI  1:22:48 - Best scientific moment 
]]>
                </itunes:summary>
                                    <itunes:image href="https://episodes.castos.com/braininspired/images/art-smith-01.jpg"></itunes:image>
                                                                            <itunes:duration>01:26:52</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 088 Randy O'Reilly: Simulating the Human Brain]]>
                </title>
                <pubDate>Mon, 02 Nov 2020 16:30:37 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-088-randy-oreilly-simulating-the-human-brain</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-088-randy-oreilly-simulating-the-human-brain</link>
                                <description>
                                            <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2020/11/art-oreilly-01.jpg" alt="" class="wp-image-1126" />



<div class="wp-block-image"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></div>



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2020/11/randy-head.jpeg" alt="" class="wp-image-1127" width="150" height="225" /></div>



<p>Randy and I discuss his LEABRA cognitive architecture that aims to simulate the human brain, plus his current theory about how a loop between cortical regions and the thalamus could implement predictive learning and thus solve how we learn with so few examples. We also discuss what Randy thinks is the next big thing neuroscience can contribute to AI (thanks to a guest question from <a href="https://braininspired.co/podcast/43/" target="_blank" rel="noreferrer noopener">Anna Schapiro</a>), and much more. </p>



<ul><li><a href="https://ccnlab.netlify.app/index.html">Computational Cognitive Neuroscience Laboratory.</a></li><li>The papers we discuss or mention:<ul><li><a href="https://ccnlab.org/papers/OReillyHazyHerd16.pdf">The Leabra Cognitive Architecture: How to Play 20 Principles with Nature and Win!</a></li><li><a href="https://ccnlab.org/papers/OReillyRussinZolfagharEtAl20.pdf">Deep Predictive Learning in Neocortex and Pulvinar</a>.</li><li><a href="https://ccnlab.org/papers/OReilly20.pdf">Unraveling the Mysteries of Motivation</a>.</li></ul></li><li>His youTube series detailing the theory and workings of Leabra:<ul><li><a href="https://www.youtube.com/playlist?list=PLu02O8xRZn7xtNx03Rlq6xMRdYcQgEpar">Computational Cognitive</a><a href="https://www.youtube.com/playlist?list=PLu02O8xRZn7xtNx03Rlq6xMRdYcQgEpar" target="_blank" rel="noreferrer noopener"> </a><a href="https://www.youtube.com/playlist?list=PLu02O8xRZn7xtNx03Rlq6xMRdYcQgEpar">Neuroscience</a>.</li></ul></li><li>The free textbook:<ul><li><a href="https://compcogneuro.org/" target="_blank" rel="noreferrer noopener">Computational Cognitive Neuroscience</a></li></ul></li></ul>



<p>A few take-home points:</p>



<ul><li>Leabra has been a slow incremental project, inspired in part by Alan Newell’s suggested approach.</li><li>Randy began by developing a learning algorithm that incorporated both kinds of biological learning (error-driven and associative).</li><li>Leabra’s core is 3 brain areas – frontal cortex, parietal cortex, and hippocampus – and has grown from there.</li><li>There’s a constant balance between biological realism and computational feasibility.</li><li>It’s important that a cognitive architecture address multiple levels- micro-scale, macro-scale, mechanisms, functions, and so on.</li><li>Deep predictive learning is a possible brain mechanism whereby predictions from higher layer cortex precede input from lower layer cortex in the thalamus, where an error is computed and used to drive learning.</li><li>Randy believes our metacognitive ability to know what we do and don’t know is a key next function to build into AI.</li></ul>



<p>Timestamps:<br /><br />0:00 –  Intro  <br />3:54 – Skip Intro  <br />6:20 – Being in awe  <br />18:57 – How current AI can inform neuro  <br />21:56 – Anna Schapiro question – how current neuro can inform AI.<br />29:20 – Learned vs. innate cognition  <br />33:43 – LEABRA  <br />38:33 – Developing Leabra  <br />40:30 – Macroscale <br />42:33 – Thalamus as microscale  <br />43:22 – Thalamocortical circuitry  <br />47:25 – Deep predictive learning  <br />56:18 – Deep predictive learning vs. backrop  <br />1:01:56 – 10 Hz learning cycle  <br />1:04:58 – Better theory vs. more data  <br />1:08:59 – Leabra vs. Spaun  <br />1:13:59 – Biological realism  <br />1:21:54 – Bottom-up inspiration  <br />1:27:26 – Biggest mistake in Leabra  <br />1:32:14 – AI consciousness  <br />1:34:45 – How would Randy begin again? </p>
]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[












Randy and I discuss his LEABRA cognitive architecture that aims to simulate the human brain, plus his current theory about how a loop between cortical regions and the thalamus could implement predictive learning and thus solve how we learn with so few examples. We also discuss what Randy thinks is the next big thing neuroscience can contribute to AI (thanks to a guest question from Anna Schapiro), and much more. 



Computational Cognitive Neuroscience Laboratory.The papers we discuss or mention:The Leabra Cognitive Architecture: How to Play 20 Principles with Nature and Win!Deep Predictive Learning in Neocortex and Pulvinar.Unraveling the Mysteries of Motivation.His youTube series detailing the theory and workings of Leabra:Computational Cognitive Neuroscience.The free textbook:Computational Cognitive Neuroscience



A few take-home points:



Leabra has been a slow incremental project, inspired in part by Alan Newell’s suggested approach.Randy began by developing a learning algorithm that incorporated both kinds of biological learning (error-driven and associative).Leabra’s core is 3 brain areas – frontal cortex, parietal cortex, and hippocampus – and has grown from there.There’s a constant balance between biological realism and computational feasibility.It’s important that a cognitive architecture address multiple levels- micro-scale, macro-scale, mechanisms, functions, and so on.Deep predictive learning is a possible brain mechanism whereby predictions from higher layer cortex precede input from lower layer cortex in the thalamus, where an error is computed and used to drive learning.Randy believes our metacognitive ability to know what we do and don’t know is a key next function to build into AI.



Timestamps:0:00 –  Intro  3:54 – Skip Intro  6:20 – Being in awe  18:57 – How current AI can inform neuro  21:56 – Anna Schapiro question – how current neuro can inform AI.29:20 – Learned vs. innate cognition  33:43 – LEABRA  38:33 – Developing Leabra  40:30 – Macroscale 42:33 – Thalamus as microscale  43:22 – Thalamocortical circuitry  47:25 – Deep predictive learning  56:18 – Deep predictive learning vs. backrop  1:01:56 – 10 Hz learning cycle  1:04:58 – Better theory vs. more data  1:08:59 – Leabra vs. Spaun  1:13:59 – Biological realism  1:21:54 – Bottom-up inspiration  1:27:26 – Biggest mistake in Leabra  1:32:14 – AI consciousness  1:34:45 – How would Randy begin again? 
]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 088 Randy O'Reilly: Simulating the Human Brain]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2020/11/art-oreilly-01.jpg" alt="" class="wp-image-1126" />



<div class="wp-block-image"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></div>



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2020/11/randy-head.jpeg" alt="" class="wp-image-1127" width="150" height="225" /></div>



<p>Randy and I discuss his LEABRA cognitive architecture that aims to simulate the human brain, plus his current theory about how a loop between cortical regions and the thalamus could implement predictive learning and thus solve how we learn with so few examples. We also discuss what Randy thinks is the next big thing neuroscience can contribute to AI (thanks to a guest question from <a href="https://braininspired.co/podcast/43/" target="_blank" rel="noreferrer noopener">Anna Schapiro</a>), and much more. </p>



<ul><li><a href="https://ccnlab.netlify.app/index.html">Computational Cognitive Neuroscience Laboratory.</a></li><li>The papers we discuss or mention:<ul><li><a href="https://ccnlab.org/papers/OReillyHazyHerd16.pdf">The Leabra Cognitive Architecture: How to Play 20 Principles with Nature and Win!</a></li><li><a href="https://ccnlab.org/papers/OReillyRussinZolfagharEtAl20.pdf">Deep Predictive Learning in Neocortex and Pulvinar</a>.</li><li><a href="https://ccnlab.org/papers/OReilly20.pdf">Unraveling the Mysteries of Motivation</a>.</li></ul></li><li>His youTube series detailing the theory and workings of Leabra:<ul><li><a href="https://www.youtube.com/playlist?list=PLu02O8xRZn7xtNx03Rlq6xMRdYcQgEpar">Computational Cognitive</a><a href="https://www.youtube.com/playlist?list=PLu02O8xRZn7xtNx03Rlq6xMRdYcQgEpar" target="_blank" rel="noreferrer noopener"> </a><a href="https://www.youtube.com/playlist?list=PLu02O8xRZn7xtNx03Rlq6xMRdYcQgEpar">Neuroscience</a>.</li></ul></li><li>The free textbook:<ul><li><a href="https://compcogneuro.org/" target="_blank" rel="noreferrer noopener">Computational Cognitive Neuroscience</a></li></ul></li></ul>



<p>A few take-home points:</p>



<ul><li>Leabra has been a slow incremental project, inspired in part by Alan Newell’s suggested approach.</li><li>Randy began by developing a learning algorithm that incorporated both kinds of biological learning (error-driven and associative).</li><li>Leabra’s core is 3 brain areas – frontal cortex, parietal cortex, and hippocampus – and has grown from there.</li><li>There’s a constant balance between biological realism and computational feasibility.</li><li>It’s important that a cognitive architecture address multiple levels- micro-scale, macro-scale, mechanisms, functions, and so on.</li><li>Deep predictive learning is a possible brain mechanism whereby predictions from higher layer cortex precede input from lower layer cortex in the thalamus, where an error is computed and used to drive learning.</li><li>Randy believes our metacognitive ability to know what we do and don’t know is a key next function to build into AI.</li></ul>



<p>Timestamps:<br /><br />0:00 –  Intro  <br />3:54 – Skip Intro  <br />6:20 – Being in awe  <br />18:57 – How current AI can inform neuro  <br />21:56 – Anna Schapiro question – how current neuro can inform AI.<br />29:20 – Learned vs. innate cognition  <br />33:43 – LEABRA  <br />38:33 – Developing Leabra  <br />40:30 – Macroscale <br />42:33 – Thalamus as microscale  <br />43:22 – Thalamocortical circuitry  <br />47:25 – Deep predictive learning  <br />56:18 – Deep predictive learning vs. backrop  <br />1:01:56 – 10 Hz learning cycle  <br />1:04:58 – Better theory vs. more data  <br />1:08:59 – Leabra vs. Spaun  <br />1:13:59 – Biological realism  <br />1:21:54 – Bottom-up inspiration  <br />1:27:26 – Biggest mistake in Leabra  <br />1:32:14 – AI consciousness  <br />1:34:45 – How would Randy begin again? </p>
]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/088-Randy-O-Reilly-public.mp3" length="95478744"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[












Randy and I discuss his LEABRA cognitive architecture that aims to simulate the human brain, plus his current theory about how a loop between cortical regions and the thalamus could implement predictive learning and thus solve how we learn with so few examples. We also discuss what Randy thinks is the next big thing neuroscience can contribute to AI (thanks to a guest question from Anna Schapiro), and much more. 



Computational Cognitive Neuroscience Laboratory.The papers we discuss or mention:The Leabra Cognitive Architecture: How to Play 20 Principles with Nature and Win!Deep Predictive Learning in Neocortex and Pulvinar.Unraveling the Mysteries of Motivation.His youTube series detailing the theory and workings of Leabra:Computational Cognitive Neuroscience.The free textbook:Computational Cognitive Neuroscience



A few take-home points:



Leabra has been a slow incremental project, inspired in part by Alan Newell’s suggested approach.Randy began by developing a learning algorithm that incorporated both kinds of biological learning (error-driven and associative).Leabra’s core is 3 brain areas – frontal cortex, parietal cortex, and hippocampus – and has grown from there.There’s a constant balance between biological realism and computational feasibility.It’s important that a cognitive architecture address multiple levels- micro-scale, macro-scale, mechanisms, functions, and so on.Deep predictive learning is a possible brain mechanism whereby predictions from higher layer cortex precede input from lower layer cortex in the thalamus, where an error is computed and used to drive learning.Randy believes our metacognitive ability to know what we do and don’t know is a key next function to build into AI.



Timestamps:0:00 –  Intro  3:54 – Skip Intro  6:20 – Being in awe  18:57 – How current AI can inform neuro  21:56 – Anna Schapiro question – how current neuro can inform AI.29:20 – Learned vs. innate cognition  33:43 – LEABRA  38:33 – Developing Leabra  40:30 – Macroscale 42:33 – Thalamus as microscale  43:22 – Thalamocortical circuitry  47:25 – Deep predictive learning  56:18 – Deep predictive learning vs. backrop  1:01:56 – 10 Hz learning cycle  1:04:58 – Better theory vs. more data  1:08:59 – Leabra vs. Spaun  1:13:59 – Biological realism  1:21:54 – Bottom-up inspiration  1:27:26 – Biggest mistake in Leabra  1:32:14 – AI consciousness  1:34:45 – How would Randy begin again? 
]]>
                </itunes:summary>
                                    <itunes:image href="https://episodes.castos.com/braininspired/images/art-oreilly-01.jpg"></itunes:image>
                                                                            <itunes:duration>01:39:08</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 087 Dileep George: Cloning for Cognitive Maps]]>
                </title>
                <pubDate>Fri, 23 Oct 2020 13:38:19 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-087-dileep-george-cloning-for-cognitive-maps</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-087-dileep-george-cloning-for-cognitive-maps</link>
                                <description>
                                            <![CDATA[
<img src="https://braininspired.co/wp-content/uploads/2020/10/art-george-1-01.jpg" alt="" class="wp-image-1123" />



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></div>



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2018/10/dileep-212x212.jpg" alt="" class="wp-image-578" /></div>



<p>When a waiter hands me the bill, how do I know whether to pay it myself or let my date pay? On this episode, I get a progress update from Dileep on his company, Vicarious, since <a href="https://braininspired.co/podcast/13/" target="_blank" rel="noreferrer noopener">Dileep's last episode</a>. We also talk broadly about his experience running Vicarious to develop AGI and robotics. Then we turn to his latest brain-inspired AI efforts using cloned structured probabilistic graph models to develop an account of how the hippocampus makes a model of the world and represents our cognitive maps in different contexts, so we can simulate possible outcomes to choose how to act.</p>



<p>Special guest questions from <a href="https://braininspired.co/podcast/70/" target="_blank" rel="noreferrer noopener">Brad Love (episode 70: How We Learn Concepts)</a> .</p>



<ul><li><a href="https://www.vicarious.com/">Vicarious</a> website - Dileep's AGI robotics company.</li><li>Twitter: <a href="https://twitter.com/dileeplearning?lang=en">@dileeplearning</a>.</li><li>Papers we discuss:<ul><li><a href="https://www.biorxiv.org/content/10.1101/864421v4">Learning cognitive maps as structured graphs for vicarious evaluation</a>.</li><li><a href="https://www.biorxiv.org/content/10.1101/2020.09.09.290601v1">A detailed mathematical theory of thalamic and cortical microcircuits based on inference in a generative vision model</a>.</li></ul></li><li><a href="https://en.wikipedia.org/wiki/Graphical_model">Probabilistic graphical models</a>.</li><li><a href="https://en.wikipedia.org/wiki/Hierarchical_temporal_memory#Comparison_of_neuron_models">Hierarchical temporal memory</a>.</li></ul>



<p>Time stamps:</p>



<p>0:00 - Intro<br />3:00 - Skip Intro<br />4:00 - Previous Dileep episode<br />10:22 - Is brain-inspired AI over-hyped?<br />14:38 - Compteition in robotics field<br />15:53 - Vicarious robotics<br />22:12 - Choosing what product to make<br />28:13 - Running a startup<br />30:52 - Old brain vs. new brain<br />37:53 - Learning cognitive maps as structured graphs<br />41:59 - Graphical models<br />47:10 - Cloning and merging, hippocampus<br />53:36 - Brad Love Question 1<br />1:00:39 - Brad Love Question 2<br />1:02:41 - Task examples<br />1:11:56 - What does hippocampus do?<br />1:14:14 - Intro to thalamic cortical microcircuit<br />1:15:21 - What AI folks think of brains<br />1:16:57 - Which levels inform which levels<br />1:20:02 - Advice for an AI startup</p>
]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[












When a waiter hands me the bill, how do I know whether to pay it myself or let my date pay? On this episode, I get a progress update from Dileep on his company, Vicarious, since Dileep's last episode. We also talk broadly about his experience running Vicarious to develop AGI and robotics. Then we turn to his latest brain-inspired AI efforts using cloned structured probabilistic graph models to develop an account of how the hippocampus makes a model of the world and represents our cognitive maps in different contexts, so we can simulate possible outcomes to choose how to act.



Special guest questions from Brad Love (episode 70: How We Learn Concepts) .



Vicarious website - Dileep's AGI robotics company.Twitter: @dileeplearning.Papers we discuss:Learning cognitive maps as structured graphs for vicarious evaluation.A detailed mathematical theory of thalamic and cortical microcircuits based on inference in a generative vision model.Probabilistic graphical models.Hierarchical temporal memory.



Time stamps:



0:00 - Intro3:00 - Skip Intro4:00 - Previous Dileep episode10:22 - Is brain-inspired AI over-hyped?14:38 - Compteition in robotics field15:53 - Vicarious robotics22:12 - Choosing what product to make28:13 - Running a startup30:52 - Old brain vs. new brain37:53 - Learning cognitive maps as structured graphs41:59 - Graphical models47:10 - Cloning and merging, hippocampus53:36 - Brad Love Question 11:00:39 - Brad Love Question 21:02:41 - Task examples1:11:56 - What does hippocampus do?1:14:14 - Intro to thalamic cortical microcircuit1:15:21 - What AI folks think of brains1:16:57 - Which levels inform which levels1:20:02 - Advice for an AI startup
]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 087 Dileep George: Cloning for Cognitive Maps]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[
<img src="https://braininspired.co/wp-content/uploads/2020/10/art-george-1-01.jpg" alt="" class="wp-image-1123" />



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></div>



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2018/10/dileep-212x212.jpg" alt="" class="wp-image-578" /></div>



<p>When a waiter hands me the bill, how do I know whether to pay it myself or let my date pay? On this episode, I get a progress update from Dileep on his company, Vicarious, since <a href="https://braininspired.co/podcast/13/" target="_blank" rel="noreferrer noopener">Dileep's last episode</a>. We also talk broadly about his experience running Vicarious to develop AGI and robotics. Then we turn to his latest brain-inspired AI efforts using cloned structured probabilistic graph models to develop an account of how the hippocampus makes a model of the world and represents our cognitive maps in different contexts, so we can simulate possible outcomes to choose how to act.</p>



<p>Special guest questions from <a href="https://braininspired.co/podcast/70/" target="_blank" rel="noreferrer noopener">Brad Love (episode 70: How We Learn Concepts)</a> .</p>



<ul><li><a href="https://www.vicarious.com/">Vicarious</a> website - Dileep's AGI robotics company.</li><li>Twitter: <a href="https://twitter.com/dileeplearning?lang=en">@dileeplearning</a>.</li><li>Papers we discuss:<ul><li><a href="https://www.biorxiv.org/content/10.1101/864421v4">Learning cognitive maps as structured graphs for vicarious evaluation</a>.</li><li><a href="https://www.biorxiv.org/content/10.1101/2020.09.09.290601v1">A detailed mathematical theory of thalamic and cortical microcircuits based on inference in a generative vision model</a>.</li></ul></li><li><a href="https://en.wikipedia.org/wiki/Graphical_model">Probabilistic graphical models</a>.</li><li><a href="https://en.wikipedia.org/wiki/Hierarchical_temporal_memory#Comparison_of_neuron_models">Hierarchical temporal memory</a>.</li></ul>



<p>Time stamps:</p>



<p>0:00 - Intro<br />3:00 - Skip Intro<br />4:00 - Previous Dileep episode<br />10:22 - Is brain-inspired AI over-hyped?<br />14:38 - Compteition in robotics field<br />15:53 - Vicarious robotics<br />22:12 - Choosing what product to make<br />28:13 - Running a startup<br />30:52 - Old brain vs. new brain<br />37:53 - Learning cognitive maps as structured graphs<br />41:59 - Graphical models<br />47:10 - Cloning and merging, hippocampus<br />53:36 - Brad Love Question 1<br />1:00:39 - Brad Love Question 2<br />1:02:41 - Task examples<br />1:11:56 - What does hippocampus do?<br />1:14:14 - Intro to thalamic cortical microcircuit<br />1:15:21 - What AI folks think of brains<br />1:16:57 - Which levels inform which levels<br />1:20:02 - Advice for an AI startup</p>
]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/087-Dileep-George-public.mp3" length="79981688"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[












When a waiter hands me the bill, how do I know whether to pay it myself or let my date pay? On this episode, I get a progress update from Dileep on his company, Vicarious, since Dileep's last episode. We also talk broadly about his experience running Vicarious to develop AGI and robotics. Then we turn to his latest brain-inspired AI efforts using cloned structured probabilistic graph models to develop an account of how the hippocampus makes a model of the world and represents our cognitive maps in different contexts, so we can simulate possible outcomes to choose how to act.



Special guest questions from Brad Love (episode 70: How We Learn Concepts) .



Vicarious website - Dileep's AGI robotics company.Twitter: @dileeplearning.Papers we discuss:Learning cognitive maps as structured graphs for vicarious evaluation.A detailed mathematical theory of thalamic and cortical microcircuits based on inference in a generative vision model.Probabilistic graphical models.Hierarchical temporal memory.



Time stamps:



0:00 - Intro3:00 - Skip Intro4:00 - Previous Dileep episode10:22 - Is brain-inspired AI over-hyped?14:38 - Compteition in robotics field15:53 - Vicarious robotics22:12 - Choosing what product to make28:13 - Running a startup30:52 - Old brain vs. new brain37:53 - Learning cognitive maps as structured graphs41:59 - Graphical models47:10 - Cloning and merging, hippocampus53:36 - Brad Love Question 11:00:39 - Brad Love Question 21:02:41 - Task examples1:11:56 - What does hippocampus do?1:14:14 - Intro to thalamic cortical microcircuit1:15:21 - What AI folks think of brains1:16:57 - Which levels inform which levels1:20:02 - Advice for an AI startup
]]>
                </itunes:summary>
                                    <itunes:image href="https://episodes.castos.com/braininspired/images/art-george-1-01.jpg"></itunes:image>
                                                                            <itunes:duration>01:23:00</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 086 Ken Stanley: Open-Endedness]]>
                </title>
                <pubDate>Mon, 12 Oct 2020 08:07:09 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-086-ken-stanley-open-endedness</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-086-ken-stanley-open-endedness</link>
                                <description>
                                            <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2020/10/art-stanley-01-1.jpg" alt="" class="wp-image-1118" />



<div class="wp-block-image"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></div>



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2020/10/kenpic3b-683x1024.jpg" alt="" class="wp-image-1117" width="171" height="256" /></div>



<p>Ken and I discuss open-endedness, the pursuit of ambitious goals by seeking novelty and interesting products instead of advancing directly toward defined objectives. We talk about evolution as a prime example of an open-ended system that has produced astounding organisms, Ken relates how open-endedness could help advance artificial intelligence and neuroscience, and we discuss a range of topics related to the general concept of open-endedness, and Ken takes a couple questions from <a href="https://braininspired.co/podcast/62/" target="_blank" rel="noreferrer noopener">Stefan Leijnen</a> and <a href="https://braininspired.co/podcast/22/" target="_blank" rel="noreferrer noopener">Melanie Mitchell</a>.</p>



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2020/10/wgcbp.jpg" alt="" class="wp-image-1116" width="167" height="250" /></div>



<p>Related:</p>



<ul><li><a href="https://www.cs.ucf.edu/~kstanley/" target="_blank" rel="noreferrer noopener">Ken’s website</a>.</li><li>Twitter: <a href="https://twitter.com/kenneth0stanley" target="_blank" rel="noreferrer noopener">@kenneth0stanley</a>.</li><li>The book:<ul><li><a href="https://www.amazon.com/gp/product/3319155237/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=3319155237&amp;linkId=60c086916468b2942d850535d267e5bf" target="_blank" rel="noreferrer noopener">Why Greatness Cannot Be Planned: The Myth of the Objective</a> by Kenneth Stanley and Joel Lehman.</li></ul></li><li>Papers:<ul><li><a href="http://nn.cs.utexas.edu/keyword?stanley:ec02">Evolving Neural Networks Through Augmenting Topologies (2002)</a></li><li><a href="http://eplex.cs.ucf.edu/papers/brant_gecco17.pdf">Minimal Criterion Coevolution: A New Approach to Open-Ended Search</a></li></ul></li></ul>



<p>Some key take-aways:</p>



<ul><li>Many of the best inventions were not the result of trying to achieve a specific objective.</li><li>Open-endedness is the pursuit of ambitious advances without a clearly defined objective.</li><li>Evolution is a quintessential example of an open-ended process: it produces a vast array of complex beings by searching the space of possible organisms, constrained by the environment, survival, and reproduction.</li><li>Perhaps the key to developing artificial general intelligence is by following an open-ended path rather that pursing objectives (solving the same old benchmark tasks, etc.).</li></ul>



<p>0:00 – Intro<br />3:46 – Skip Intro<br />4:30 – Evolution as an Open-ended process<br />8:25 – Why Greatness Cannot Be Planned<br />20:46 – Open-endedness in AI<br />29:35 – Constraints vs. objectives<br />36:26 – The adjacent possible<br />41:22 – Serendipity<br />44:33 – Stefan Leijnen question<br />53:11 – Melanie Mitchell question<br />1:00:32 – Efficiency<br />1:02:13 – Gentle Earth<br />1:05:25 – Learning vs. evolution<br />1:10:53 – AGI<br />1:14:06 – Neuroscience, AI, and open-endedness<br />1:26:06 – Open AI</p>
]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[












Ken and I discuss open-endedness, the pursuit of ambitious goals by seeking novelty and interesting products instead of advancing directly toward defined objectives. We talk about evolution as a prime example of an open-ended system that has produced astounding organisms, Ken relates how open-endedness could help advance artificial intelligence and neuroscience, and we discuss a range of topics related to the general concept of open-endedness, and Ken takes a couple questions from Stefan Leijnen and Melanie Mitchell.







Related:



Ken’s website.Twitter: @kenneth0stanley.The book:Why Greatness Cannot Be Planned: The Myth of the Objective by Kenneth Stanley and Joel Lehman.Papers:Evolving Neural Networks Through Augmenting Topologies (2002)Minimal Criterion Coevolution: A New Approach to Open-Ended Search



Some key take-aways:



Many of the best inventions were not the result of trying to achieve a specific objective.Open-endedness is the pursuit of ambitious advances without a clearly defined objective.Evolution is a quintessential example of an open-ended process: it produces a vast array of complex beings by searching the space of possible organisms, constrained by the environment, survival, and reproduction.Perhaps the key to developing artificial general intelligence is by following an open-ended path rather that pursing objectives (solving the same old benchmark tasks, etc.).



0:00 – Intro3:46 – Skip Intro4:30 – Evolution as an Open-ended process8:25 – Why Greatness Cannot Be Planned20:46 – Open-endedness in AI29:35 – Constraints vs. objectives36:26 – The adjacent possible41:22 – Serendipity44:33 – Stefan Leijnen question53:11 – Melanie Mitchell question1:00:32 – Efficiency1:02:13 – Gentle Earth1:05:25 – Learning vs. evolution1:10:53 – AGI1:14:06 – Neuroscience, AI, and open-endedness1:26:06 – Open AI
]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 086 Ken Stanley: Open-Endedness]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2020/10/art-stanley-01-1.jpg" alt="" class="wp-image-1118" />



<div class="wp-block-image"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></div>



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2020/10/kenpic3b-683x1024.jpg" alt="" class="wp-image-1117" width="171" height="256" /></div>



<p>Ken and I discuss open-endedness, the pursuit of ambitious goals by seeking novelty and interesting products instead of advancing directly toward defined objectives. We talk about evolution as a prime example of an open-ended system that has produced astounding organisms, Ken relates how open-endedness could help advance artificial intelligence and neuroscience, and we discuss a range of topics related to the general concept of open-endedness, and Ken takes a couple questions from <a href="https://braininspired.co/podcast/62/" target="_blank" rel="noreferrer noopener">Stefan Leijnen</a> and <a href="https://braininspired.co/podcast/22/" target="_blank" rel="noreferrer noopener">Melanie Mitchell</a>.</p>



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2020/10/wgcbp.jpg" alt="" class="wp-image-1116" width="167" height="250" /></div>



<p>Related:</p>



<ul><li><a href="https://www.cs.ucf.edu/~kstanley/" target="_blank" rel="noreferrer noopener">Ken’s website</a>.</li><li>Twitter: <a href="https://twitter.com/kenneth0stanley" target="_blank" rel="noreferrer noopener">@kenneth0stanley</a>.</li><li>The book:<ul><li><a href="https://www.amazon.com/gp/product/3319155237/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=3319155237&amp;linkId=60c086916468b2942d850535d267e5bf" target="_blank" rel="noreferrer noopener">Why Greatness Cannot Be Planned: The Myth of the Objective</a> by Kenneth Stanley and Joel Lehman.</li></ul></li><li>Papers:<ul><li><a href="http://nn.cs.utexas.edu/keyword?stanley:ec02">Evolving Neural Networks Through Augmenting Topologies (2002)</a></li><li><a href="http://eplex.cs.ucf.edu/papers/brant_gecco17.pdf">Minimal Criterion Coevolution: A New Approach to Open-Ended Search</a></li></ul></li></ul>



<p>Some key take-aways:</p>



<ul><li>Many of the best inventions were not the result of trying to achieve a specific objective.</li><li>Open-endedness is the pursuit of ambitious advances without a clearly defined objective.</li><li>Evolution is a quintessential example of an open-ended process: it produces a vast array of complex beings by searching the space of possible organisms, constrained by the environment, survival, and reproduction.</li><li>Perhaps the key to developing artificial general intelligence is by following an open-ended path rather that pursing objectives (solving the same old benchmark tasks, etc.).</li></ul>



<p>0:00 – Intro<br />3:46 – Skip Intro<br />4:30 – Evolution as an Open-ended process<br />8:25 – Why Greatness Cannot Be Planned<br />20:46 – Open-endedness in AI<br />29:35 – Constraints vs. objectives<br />36:26 – The adjacent possible<br />41:22 – Serendipity<br />44:33 – Stefan Leijnen question<br />53:11 – Melanie Mitchell question<br />1:00:32 – Efficiency<br />1:02:13 – Gentle Earth<br />1:05:25 – Learning vs. evolution<br />1:10:53 – AGI<br />1:14:06 – Neuroscience, AI, and open-endedness<br />1:26:06 – Open AI</p>
]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/086-Ken-Stanley.mp3" length="92183825"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[












Ken and I discuss open-endedness, the pursuit of ambitious goals by seeking novelty and interesting products instead of advancing directly toward defined objectives. We talk about evolution as a prime example of an open-ended system that has produced astounding organisms, Ken relates how open-endedness could help advance artificial intelligence and neuroscience, and we discuss a range of topics related to the general concept of open-endedness, and Ken takes a couple questions from Stefan Leijnen and Melanie Mitchell.







Related:



Ken’s website.Twitter: @kenneth0stanley.The book:Why Greatness Cannot Be Planned: The Myth of the Objective by Kenneth Stanley and Joel Lehman.Papers:Evolving Neural Networks Through Augmenting Topologies (2002)Minimal Criterion Coevolution: A New Approach to Open-Ended Search



Some key take-aways:



Many of the best inventions were not the result of trying to achieve a specific objective.Open-endedness is the pursuit of ambitious advances without a clearly defined objective.Evolution is a quintessential example of an open-ended process: it produces a vast array of complex beings by searching the space of possible organisms, constrained by the environment, survival, and reproduction.Perhaps the key to developing artificial general intelligence is by following an open-ended path rather that pursing objectives (solving the same old benchmark tasks, etc.).



0:00 – Intro3:46 – Skip Intro4:30 – Evolution as an Open-ended process8:25 – Why Greatness Cannot Be Planned20:46 – Open-endedness in AI29:35 – Constraints vs. objectives36:26 – The adjacent possible41:22 – Serendipity44:33 – Stefan Leijnen question53:11 – Melanie Mitchell question1:00:32 – Efficiency1:02:13 – Gentle Earth1:05:25 – Learning vs. evolution1:10:53 – AGI1:14:06 – Neuroscience, AI, and open-endedness1:26:06 – Open AI
]]>
                </itunes:summary>
                                    <itunes:image href="https://episodes.castos.com/braininspired/images/art-stanley-01.jpg"></itunes:image>
                                                                            <itunes:duration>01:35:43</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 085 Ida Momennejad: Learning Representations]]>
                </title>
                <pubDate>Wed, 30 Sep 2020 14:20:54 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-085-ida-momennejad-learning-representations</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-085-ida-momennejad-learning-representations</link>
                                <description>
                                            <![CDATA[
<img src="https://braininspired.co/wp-content/uploads/2020/09/art-mommenejad-01.jpg" alt="" class="wp-image-1111" />



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></div>



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2020/09/5p609bJh.jpg" alt="" class="wp-image-1112" width="245" height="245" /></div>



<p>Ida and I discuss the current landscape of reinforcement learning in both natural and artificial intelligence, and how the old story of two RL systems in brains - model-free and model-based - is giving way to a more nuanced story of these two systems constantly interacting and additional RL strategies between model-free and model-based to drive the vast repertoire of our habits and goal-directed behaviors. We discuss Ida’s work on one of those “in-between” strategies, the successor representation RL strategy, which maps onto brain activity and accounts for behavior. We also discuss her interesting background and how it affects her outlook and research pursuit, and the role philosophy has played and continues to play in her thought processes.</p>



<p>Related links:</p>



<ul><li><a href="https://www.momen-nejad.org/" target="_blank" rel="noreferrer noopener">Ida’s website</a>.</li><li>Twitter: <a href="https://twitter.com/criticalneuro" target="_blank" rel="noreferrer noopener">@criticalneuro</a>.</li><li>A nice review of what we discuss:<ul><li><a href="https://c92f2aaf-c608-4b3b-9515-11f6b86527f2.filesusr.com/ugd/a6d7e4_850af79063784e60949ba8529006dee5.pdf" target="_blank" rel="noreferrer noopener">Learning Structures: Predictive Representations, Replay, and Generalization.</a></li></ul></li></ul>



<p>Time stamps:</p>



<p>0:00 - Intro<br />4:50 - Skip intro<br />9:58 - Core way of thinking <br />19:58 - Disillusionment<br />27:22 - Role of philosophy<br />34:51 - Optimal individual learning strategy<br />39:28 - Microsoft job<br />44:48 - Field of reinforcement learning<br />51:18 - Learning vs. innate priors<br />59:47 - Incorporating other cognition into RL<br />1:08:24 - Evolution<br />1:12:46 - Model-free and model-based RL<br />1:19:02 - Successor representation<br />1:26:48 - Are we running all algorithms all the time?<br />1:28:38 - Heuristics and intuition<br />1:33:48 - Levels of analysis<br />1:37:28 - Consciousness</p>
]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[












Ida and I discuss the current landscape of reinforcement learning in both natural and artificial intelligence, and how the old story of two RL systems in brains - model-free and model-based - is giving way to a more nuanced story of these two systems constantly interacting and additional RL strategies between model-free and model-based to drive the vast repertoire of our habits and goal-directed behaviors. We discuss Ida’s work on one of those “in-between” strategies, the successor representation RL strategy, which maps onto brain activity and accounts for behavior. We also discuss her interesting background and how it affects her outlook and research pursuit, and the role philosophy has played and continues to play in her thought processes.



Related links:



Ida’s website.Twitter: @criticalneuro.A nice review of what we discuss:Learning Structures: Predictive Representations, Replay, and Generalization.



Time stamps:



0:00 - Intro4:50 - Skip intro9:58 - Core way of thinking 19:58 - Disillusionment27:22 - Role of philosophy34:51 - Optimal individual learning strategy39:28 - Microsoft job44:48 - Field of reinforcement learning51:18 - Learning vs. innate priors59:47 - Incorporating other cognition into RL1:08:24 - Evolution1:12:46 - Model-free and model-based RL1:19:02 - Successor representation1:26:48 - Are we running all algorithms all the time?1:28:38 - Heuristics and intuition1:33:48 - Levels of analysis1:37:28 - Consciousness
]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 085 Ida Momennejad: Learning Representations]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[
<img src="https://braininspired.co/wp-content/uploads/2020/09/art-mommenejad-01.jpg" alt="" class="wp-image-1111" />



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></div>



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2020/09/5p609bJh.jpg" alt="" class="wp-image-1112" width="245" height="245" /></div>



<p>Ida and I discuss the current landscape of reinforcement learning in both natural and artificial intelligence, and how the old story of two RL systems in brains - model-free and model-based - is giving way to a more nuanced story of these two systems constantly interacting and additional RL strategies between model-free and model-based to drive the vast repertoire of our habits and goal-directed behaviors. We discuss Ida’s work on one of those “in-between” strategies, the successor representation RL strategy, which maps onto brain activity and accounts for behavior. We also discuss her interesting background and how it affects her outlook and research pursuit, and the role philosophy has played and continues to play in her thought processes.</p>



<p>Related links:</p>



<ul><li><a href="https://www.momen-nejad.org/" target="_blank" rel="noreferrer noopener">Ida’s website</a>.</li><li>Twitter: <a href="https://twitter.com/criticalneuro" target="_blank" rel="noreferrer noopener">@criticalneuro</a>.</li><li>A nice review of what we discuss:<ul><li><a href="https://c92f2aaf-c608-4b3b-9515-11f6b86527f2.filesusr.com/ugd/a6d7e4_850af79063784e60949ba8529006dee5.pdf" target="_blank" rel="noreferrer noopener">Learning Structures: Predictive Representations, Replay, and Generalization.</a></li></ul></li></ul>



<p>Time stamps:</p>



<p>0:00 - Intro<br />4:50 - Skip intro<br />9:58 - Core way of thinking <br />19:58 - Disillusionment<br />27:22 - Role of philosophy<br />34:51 - Optimal individual learning strategy<br />39:28 - Microsoft job<br />44:48 - Field of reinforcement learning<br />51:18 - Learning vs. innate priors<br />59:47 - Incorporating other cognition into RL<br />1:08:24 - Evolution<br />1:12:46 - Model-free and model-based RL<br />1:19:02 - Successor representation<br />1:26:48 - Are we running all algorithms all the time?<br />1:28:38 - Heuristics and intuition<br />1:33:48 - Levels of analysis<br />1:37:28 - Consciousness</p>
]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/085-Ida-Mommenejad-public.mp3" length="99845662"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[












Ida and I discuss the current landscape of reinforcement learning in both natural and artificial intelligence, and how the old story of two RL systems in brains - model-free and model-based - is giving way to a more nuanced story of these two systems constantly interacting and additional RL strategies between model-free and model-based to drive the vast repertoire of our habits and goal-directed behaviors. We discuss Ida’s work on one of those “in-between” strategies, the successor representation RL strategy, which maps onto brain activity and accounts for behavior. We also discuss her interesting background and how it affects her outlook and research pursuit, and the role philosophy has played and continues to play in her thought processes.



Related links:



Ida’s website.Twitter: @criticalneuro.A nice review of what we discuss:Learning Structures: Predictive Representations, Replay, and Generalization.



Time stamps:



0:00 - Intro4:50 - Skip intro9:58 - Core way of thinking 19:58 - Disillusionment27:22 - Role of philosophy34:51 - Optimal individual learning strategy39:28 - Microsoft job44:48 - Field of reinforcement learning51:18 - Learning vs. innate priors59:47 - Incorporating other cognition into RL1:08:24 - Evolution1:12:46 - Model-free and model-based RL1:19:02 - Successor representation1:26:48 - Are we running all algorithms all the time?1:28:38 - Heuristics and intuition1:33:48 - Levels of analysis1:37:28 - Consciousness
]]>
                </itunes:summary>
                                    <itunes:image href="https://episodes.castos.com/braininspired/images/art-mommenejad-01.jpg"></itunes:image>
                                                                            <itunes:duration>01:43:41</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 084 György Buzsáki and David Poeppel]]>
                </title>
                <pubDate>Tue, 15 Sep 2020 10:22:29 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-084-gyorgy-buzsaki-and-david-poeppel</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-084-gyorgy-buzsaki-and-david-poeppel</link>
                                <description>
                                            <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2020/09/art-buzsaki-poeppel-01.jpg" alt="" class="wp-image-1106" />



<div class="wp-block-image"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></div>



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2020/09/GyruiPortrait.jpg" alt="" class="wp-image-1107" width="200" height="273" /></div>



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2020/09/david-mug-current.png" alt="" class="wp-image-1108" width="220" /></div>



<p>David, Gyuri, and I discuss the issues they argue for in their back and forth commentaries about the importance of neuroscience and psychology, or implementation-level and computational-level, to advance our understanding of brains and minds – and the names we give to the things we study. Gyuri believes it’s time we use what we know and discover about brain mechanisms to better describe the psychological concepts we refer to as explanations for minds; David believes the psychological concepts are constantly being refined and are just as valid as objects of study to understand minds. They both agree these are important and enjoyable topics to debate.<br /><br />Also, special guest questions from <a href="https://braininspired.co/podcast/66/">Paul Cisek</a> and <a href="https://braininspired.co/podcast/77/">John Krakauer</a>.</p>



<p>Related:</p>



<div class="wp-block-image"><a href="https://www.amazon.com/gp/product/0190905387/ref=as_li_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;camp=1789&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=0190905387&amp;linkId=0732d2e75ef056e2d42950e794a3b3aa" target="_blank" rel="noreferrer noopener"><img src="https://braininspired.co/wp-content/uploads/2020/01/41PW0EJV7TL._SX351_BO1204203200_.jpg" alt="" class="wp-image-943" width="177" height="250" /></a></div>



<ul><li><a href="https://buzsakilab.com/wp/">Buzsáki lab</a>; <a href="http://psych.nyu.edu/clash/poeppellab/" target="_blank" rel="noreferrer noopener">Poeppel lab</a></li><li>Twitter: <a href="https://twitter.com/davidpoeppel">@davidpoeppel</a>.</li><li>The papers we discuss or mention:<ul><li><a href="https://www.eneuro.org/content/7/4/ENEURO.0314-20.2020">Calling Names</a> by Christophe Bernard</li><li><a href="https://www.eneuro.org/content/7/4/ENEURO.0069-20.2020">The Brain–Cognitive Behavior Problem: A Retrospective</a> by György Buzsáki.</li><li><a href="https://doi.org/10.1523/ENEURO.0215-20.2020">Against the Epistemological Primacy of the Hardware: The Brain from Inside Out, Turned Upside Down</a> by David Poeppel.</li></ul></li><li>Books:<ul><li><a href="https://www.amazon.com/gp/product/0190905387/ref=as_li_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;camp=1789&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=0190905387&amp;linkId=0732d2e75ef056e2d42950e794a3b3aa">The Brain from Inside </a><a href="https://www.amazon.com/gp/product/0190905387/ref=as_li_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;camp=1789&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=0190905387&amp;linkId=0732d2e75ef056e2d42950e794a3b3aa" target="_blank" rel="noreferrer noopener">Out</a> by György Buzsáki.</li><li><a href="https://www.amazon.com/gp/product/0262043254/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=0262043254&amp;linkId=f0ebddba029c245869039266a732778d">The Cognitive Neurosciences</a> (edited by David Poeppel et al).</li></ul></li></ul>



<p>Timeline:</p>



<p>0:00 – Intro<br />5:31 – Skip intro<br />8:42 – Gyuri and David summaries<br />25:45 – Guest questions<br />36:25 – Gyuri new language<br />49:41 – Language and oscillations<br />53:52 – Do we know what cognitive functions we’re looking for?<br />58:25 – Psychiatry<br />1:00:25 – Steve Grossberg approach<br />1:02:12 – Neuroethology<br />1:09:08 – A...</p>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[
















David, Gyuri, and I discuss the issues they argue for in their back and forth commentaries about the importance of neuroscience and psychology, or implementation-level and computational-level, to advance our understanding of brains and minds – and the names we give to the things we study. Gyuri believes it’s time we use what we know and discover about brain mechanisms to better describe the psychological concepts we refer to as explanations for minds; David believes the psychological concepts are constantly being refined and are just as valid as objects of study to understand minds. They both agree these are important and enjoyable topics to debate.Also, special guest questions from Paul Cisek and John Krakauer.



Related:







Buzsáki lab; Poeppel labTwitter: @davidpoeppel.The papers we discuss or mention:Calling Names by Christophe BernardThe Brain–Cognitive Behavior Problem: A Retrospective by György Buzsáki.Against the Epistemological Primacy of the Hardware: The Brain from Inside Out, Turned Upside Down by David Poeppel.Books:The Brain from Inside Out by György Buzsáki.The Cognitive Neurosciences (edited by David Poeppel et al).



Timeline:



0:00 – Intro5:31 – Skip intro8:42 – Gyuri and David summaries25:45 – Guest questions36:25 – Gyuri new language49:41 – Language and oscillations53:52 – Do we know what cognitive functions we’re looking for?58:25 – Psychiatry1:00:25 – Steve Grossberg approach1:02:12 – Neuroethology1:09:08 – A...]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 084 György Buzsáki and David Poeppel]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2020/09/art-buzsaki-poeppel-01.jpg" alt="" class="wp-image-1106" />



<div class="wp-block-image"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></div>



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2020/09/GyruiPortrait.jpg" alt="" class="wp-image-1107" width="200" height="273" /></div>



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2020/09/david-mug-current.png" alt="" class="wp-image-1108" width="220" /></div>



<p>David, Gyuri, and I discuss the issues they argue for in their back and forth commentaries about the importance of neuroscience and psychology, or implementation-level and computational-level, to advance our understanding of brains and minds – and the names we give to the things we study. Gyuri believes it’s time we use what we know and discover about brain mechanisms to better describe the psychological concepts we refer to as explanations for minds; David believes the psychological concepts are constantly being refined and are just as valid as objects of study to understand minds. They both agree these are important and enjoyable topics to debate.<br /><br />Also, special guest questions from <a href="https://braininspired.co/podcast/66/">Paul Cisek</a> and <a href="https://braininspired.co/podcast/77/">John Krakauer</a>.</p>



<p>Related:</p>



<div class="wp-block-image"><a href="https://www.amazon.com/gp/product/0190905387/ref=as_li_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;camp=1789&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=0190905387&amp;linkId=0732d2e75ef056e2d42950e794a3b3aa" target="_blank" rel="noreferrer noopener"><img src="https://braininspired.co/wp-content/uploads/2020/01/41PW0EJV7TL._SX351_BO1204203200_.jpg" alt="" class="wp-image-943" width="177" height="250" /></a></div>



<ul><li><a href="https://buzsakilab.com/wp/">Buzsáki lab</a>; <a href="http://psych.nyu.edu/clash/poeppellab/" target="_blank" rel="noreferrer noopener">Poeppel lab</a></li><li>Twitter: <a href="https://twitter.com/davidpoeppel">@davidpoeppel</a>.</li><li>The papers we discuss or mention:<ul><li><a href="https://www.eneuro.org/content/7/4/ENEURO.0314-20.2020">Calling Names</a> by Christophe Bernard</li><li><a href="https://www.eneuro.org/content/7/4/ENEURO.0069-20.2020">The Brain–Cognitive Behavior Problem: A Retrospective</a> by György Buzsáki.</li><li><a href="https://doi.org/10.1523/ENEURO.0215-20.2020">Against the Epistemological Primacy of the Hardware: The Brain from Inside Out, Turned Upside Down</a> by David Poeppel.</li></ul></li><li>Books:<ul><li><a href="https://www.amazon.com/gp/product/0190905387/ref=as_li_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;camp=1789&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=0190905387&amp;linkId=0732d2e75ef056e2d42950e794a3b3aa">The Brain from Inside </a><a href="https://www.amazon.com/gp/product/0190905387/ref=as_li_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;camp=1789&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=0190905387&amp;linkId=0732d2e75ef056e2d42950e794a3b3aa" target="_blank" rel="noreferrer noopener">Out</a> by György Buzsáki.</li><li><a href="https://www.amazon.com/gp/product/0262043254/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=0262043254&amp;linkId=f0ebddba029c245869039266a732778d">The Cognitive Neurosciences</a> (edited by David Poeppel et al).</li></ul></li></ul>



<p>Timeline:</p>



<p>0:00 – Intro<br />5:31 – Skip intro<br />8:42 – Gyuri and David summaries<br />25:45 – Guest questions<br />36:25 – Gyuri new language<br />49:41 – Language and oscillations<br />53:52 – Do we know what cognitive functions we’re looking for?<br />58:25 – Psychiatry<br />1:00:25 – Steve Grossberg approach<br />1:02:12 – Neuroethology<br />1:09:08 – AI as tabula rasa<br />1:17: 40 – What’s at stake?<br />1:36:20 – Will the space between neuroscience and psychology disappear?</p>


<div class="lightweight-accordion"><span>Transcript</span><div class="lightweight-accordion-body">

<p>Gyuri    00:00:01    Every science in the history of physics, chemistry, genetics, computer science, they came up with their own defined vocabulary neuroscience hasn’t Euroscience is just take it for granted that we are already given a vocabulary. And V we have a roadmap. All we have to do is take those words and, and identify them. The mechanism.  </p>



<p>David    00:00:23    I’m a naive realist. I believe the world is as it is, and we can figure it out. And I would like to use the best put our best foot forward. Right. And so I think the, uh, slightly too casual dismissal of the contribution of the psychological sciences comes at a cost that I don’t want to incur.  </p>



<p>Gyuri    00:00:42    David, let me push you in a corner now. Okay, please. He’s a quote from you that takes to be explained a human cognitive faculties. So here are my questions.  </p>



<p>David    00:00:56    It’s important. I think that people realize that we can be friends, colleagues, and actually appreciate the work, and just try to understand these very difficult problems and debate. There’s just nothing wrong with that. That hopefully makes better science makes her arguments deeper. And these were we’re working on extremely difficult problems.  </p>



<p>Speaker 3    00:01:21    This is brain inspired.  </p>



<p>Paul    00:01:34    What’s more important for understanding brain and mental functions, psychology or neuroscience. That’s the super short version of the question for today’s episode, which is about the balance between the functional level terms and descriptions of psychology and the mechanism level terms and descriptions of neuroscience and how we should proceed to explain and understand not only the relation between brains and minds, but how to conceptualize what it is. We’re even trying to explain and understand, uh, Hey everyone, I’m Paul, uh, welcome to the show and we’re really grateful to bring you today. Your abuser, Jackie and David  to discuss this. So URI and David are both neuroscientists at NYU. David’s been on the podcast before, back on episode 46. And the reason they’re both on is because of a recent exchange of commentaries they had in the journal. e-Neuro about these topics. URI has recently written the book, the brain from inside out, which summarizes years and years of his thoughts and, uh, much neuroscience work from his own lab and from other labs, uh, it is a fantastic book and we refer to it a lot throughout the conversation.  </p>



<p>Paul    00:02:51    So you’ll learn more about it, but the exchange, he and David had recently stems from URI deciding to try to publish that he wrote 20 years ago, which got rejected 20 years ago. But that 20 year old manuscript contains the seeds and some core ideas that later developed into his book, the brain from inside out. So Yuri resubmitted that original commentary manuscript, and you’ll hear him tell this story, but the end product is a series of commentaries, uh, between the editor, Christophe Bernard and URI and David. So the short version is that URI believes we’ve suffered too long at the hands of psychologists who tend to give a name to something that they deem a mental function like attention, and then leave it up to neuroscientists to then find a tension in the brain. Uh, he thinks rather that we should use what we observe about brains to better develop the concepts and terms in psychology, David, uh, who, by the way, recently released the latest edition of the definitive cognitive neurosciences textbook.  </p>



<p>Paul    00:03:58    Uh, David believes it’s a mistake to give brain mechanisms primacy over mental functions when it comes to understanding the emergent properties of brain function and our minds that rather they should be given equal footing and that neuroscientists, whether they admit it or not are always operating under some assumptions about the mental functions, their brain experiments refer to. All right. So those are just the starting points. We cover a lot of terrain from there. And for fun, I asked two previous guests on the show, Paul ChIP-seq and Jon Krakauer to each record a question for your and David. So I play those questions and, uh, URI and David, uh, respond. And there’s even a guest appearance by a little family of black bears for a little intermission. All right. So I link to all of the many things I just discussed, uh, you know, include including the commentaries and URIs book and David’s book, uh, in the show notes at brain inspired.co/podcast/ 84, man, if you value this podcast and you want to support it and hear the full versions of all the episodes and occasional separate bonus episodes, you can do that for next to nothing through Patrion, go to brain inspired.co and click the red Patrion button there.  </p>



<p>Paul    00:05:15    All right. Again, I just feel very grateful to bring you these wonderful minds, and I feel lucky for being able to host them. And I hope you feel lucky for getting to listen to them, enjoy your, a, your recent book, the brain from inside out, um, which I love. And I also loved, uh, rhythms of the brain. They’re both written really, really well. Um, and it’s just something for authors to aspire to. One of the wonderful features, uh, in both those books are the summaries that you give at the end, the little chapter summaries. I they’re so good when I was in graduate school. After I read rhythms of the brain, uh, I took the book which the library had to the copier, which also had a scanner. And at the time this seemed pretty fancy to me and I scanned in all of the summaries and I still have it on my computer.  </p>



<p>Paul    00:06:07    So it’s, it’s made it through a few different computers now. And just recently, I just pulled it out to actually to make sure I still had it. Um, so nice job on both books, but the brain from inside out is, is also filled with a bunch of quotes. So I’m going to start with, um, what I think is a relevant quote for today and see if you guys can guess who it’s from. Quote, I am convinced that an important stage of human thought will have been reached when the physiological and the psychological, the objective and the subjective are actually United when the tormenting conflicts or contradictions between my consciousness and my body will have been factually resolved or discarded. Can you guys guess who that is?  </p>



<p>David    00:06:52    No, but I would have said Descartes  </p>



<p>Paul    00:06:54    Descartes, pretty good guests, URI, any guests,  </p>



<p>Gyuri    00:06:57    Somebody like that. Cod could say that,  </p>



<p>Paul    00:06:59    Well, Pavlov would be, I think you would take that as a compliment. She was cast as Dick Hart.  </p>



<p>Gyuri    00:07:05    Yeah. There is nothing new under the sun. There’s nothing new under the sun.  </p>



<p>Paul    00:07:11    That’s right. It’s an old, yeah, it really it’s it’s and Descartes is, it could very well have been Descartes, so people can be very opinionated about this topic. So I’m confident today that we will, uh, resolve it. I’m sure. Okay. Here’s another quote. Before we really get the ball rolling. Quote, I analyzed how an undefined and an agreed upon terminology, which we inherited from our pre neuroscience ancestors and never questioned, has it become a roadblock to progress Yuri and he guesses Decart perhaps  </p>



<p>David    00:07:47    That’s URI. There  </p>



<p>Paul    00:07:49    You go. Yeah. That’s URI. That’s a tweet.  </p>



<p>David    00:07:52    It’s a tweet that you already wrote for sure.  </p>



<p>Gyuri    00:07:55    Yeah. But, you know, I can, I can come up with several people. Of course, who, again, the thoughts mature very slowly and they get embedded into the right environment. And, uh, that, that process is very slow.  </p>



<p>Paul    00:08:11    Yeah. Well, okay guys, uh, I’m going to ask you both to summarize, you know, two minutes, you know, whatever it really takes, not, not more than 45 minutes, kind of just briefly summarize your positions, your perspectives on these issues. Uh, and then, and then we’ll, we’ll move on from there. And your, uh, let’s just start with you because, uh, the way that the articles worked is you, you, you posted your 20 year old article, which David then got to respond to. And then, you know, after you guys summarize, then we’ll come back and I’ll let Yuri also respond to David’s response. So you’re a ticket away.  </p>



<p>Gyuri    00:08:46    Well, historically research on the brain has been working its way in from the outside world, hoping that such systematic exploration will take us to the middle and on through the middle to the output. Ever since the time I’ve already still told fellows as a scientist assume that the brain or more precisely back then the mind is initially a blank slate. Feel the upgrade, your lived experience in an outside in manner. And the alternative a brain centric view. The one I’m proposing is that self-organized brain networks induce a vest repertoire of preformed neuronal patterns while interacting with the world. Some of these initially nonsensical patterns acquire behavioral significance, or you can call them meaning that’s. The experience is a process of matching pre-existing neuro dynamics to events of the world. The perpetuity active internal dynamic is the source of cognition and your operation disengaged from images census accordingly.  </p>



<p>Gyuri    00:09:46    In my book that you kind of mentioned the brain from inside out, I discuss three major topics. First, how we got to our present presently dominant framework in neuroscience. The second part argues in favor of the idea that all source of knowledge, including our perceptions, memories and plans that eyes from actions in the third part are you would say the advantages of a self-organized pre-configured brain and presented as an alternative to the black slate framework. Now, thinking about part of the book began 20 years ago, when I wrote that review the manifesto, if you want about the origin of our scientific terminology, which is the main topic of the book’s introduction in this old review, I summarize how neuroscientists began to study the brain, buying into a system created by philosophers and psychologists for understanding the soul and the mind without ever asking how those terms, whose brain functioning neuroscientists are trying to understand such as consciousness attention.  </p>



<p>Gyuri    00:10:44    And so it brought into our thinking in the first place I argued that this outside in framework may not be the best strategy to understand the brain. Although back then, I didn’t have enough ammunition to offer an alternative. The arguments laid down in that unpublished manuscript have become more popular over the years. So to test the waters, I resubmitted the paper and got full good reviews. Now publish this text with its 20 year old references and thinking is a good target for contemporary coated scientists like David Poeppel, who can repeat the path and offer better. Substituents  </p>



<p>Paul    00:11:23    All right. That’s good. Good introduction. David, do you want to just take  </p>



<p>David    00:11:28    Sure. Let me get, so there is a few things. So for context, I want to start with, um, first of all, uh, like you said, Paul come for me as well. Your, his book rhythms of the brain was absolutely foundational and super important and required reading in my lab. Um, it’s, uh, absolutely, you know, kind of masterpiece. And I have to say very kindly, I once saw URI, uh, at a lecture I gave, I think it was a Rutgers. He signed the book. I had the book with me and he signed it saying all intelligent people think about timing, which thanks for that URI. And then of course I with, you know, voraciously read his recent book. Now there’s a kind of larger points I want to start with before we debate all kinds of nitty gritty argumentation. That is, I want to make clear to listeners in particular students and postdocs or trainees, how useful it is to be able to have debates even among friends and colleagues.  </p>



<p>David    00:12:28    So a few years ago I was teaching a class at cold spring Harbor. And in that context, I got into a really intensive debate with one of my very close friends and we really had it out for an hour or so. And the students came back and felt uncomfortable, but this level of back and forth, which that sounds really disturbing because it has nothing to do with our regard for each other, with our respect for each other’s work. And so on, we just, we were disagreeing on some points and we really went after each other. And this, the training is didn’t like that. And I found that not good actually. And it really, I found it to be willed during and disturbing. And one of the things I really appreciate about being able to sort of bounce back and forth ideas with Yuri and having a discussion today is that it’s important.  </p>



<p>David    00:13:15    I think that people realize that we can be friends, colleagues and actually appreciate the work and just try to understand these very difficult problems in debate. There’s just nothing wrong with that. That hopefully makes better science makes her arguments deeper. And these were we’re working on extremely difficult problems. And so for don’t begrudge us that we, we can like each other and still debate points because we’re trying to move the thing forward. And I found that a really odd reaction a few years ago, and it really kind of moved me. So I’m very happy that we can do something about the topics.  </p>



<p>Gyuri    00:13:48    So I had exactly the same memory and experience as your students many years ago, when my mentor entered Russia and invented both Fremont, our Volta, Fremont to give a talk in Hungary and then  attacked him. And it was strange to me, you know, why he invites a person from far away lands to attack you and that lunch Walter Freeman said, oh, I never had such a good time for a long time,  </p>



<p>Paul    00:14:19    But it made you uncomfortable Yuri at the time, or,  </p>



<p>Gyuri    00:14:21    Well, it was yeah, just like David students. But I think David students also realize in time that, you know, discussion is the way how to move forward, don’t play out your cards. How do you know what’s in the cards?  </p>



<p>Paul    00:14:36    Well, I was going to ask David before you told that story, whether he thought that it was a generational thing or whether it was a developmental aspect and that happens every generation, is it snowflakes or is it a normal developmental? I mean, I,  </p>



<p>David    00:14:52    I must say, I don’t know. So the, the paper that I wrote, just in reaction to your research paper that, you know, Christophe Bernard, kindly invited us to comment on is a, is a graduate student. So here’s a young, you know, Federico Adelphi is a young scientist who works on computational topics and psychophysics he’s just has a big appetite for these topics and knowledge and likes to read. So it can’t be purely generational. I do remember in graduate school, growing up in a context where debate was a vigorous, maybe sometimes too vigorous and valued. And, um, I think that’s not always the case so much. And I think we’re, we at our, it’s a risk that we take to be too. I mean, we, we, these are not things that are ad hominem. We can say, I just think you’re wrong about this argument. And we try to move the needle. And I think people should accept that as part of a positive part of the scientific process actually,  </p>



<p>Paul    00:15:39    Is that, is that why you, so in your article and I’m sorry, I started digression, which is totally fine, cause that’s what we should do, but is that why you are so complimentary throughout the article of Yuri’s work just to ward off the, uh, the potential?  </p>



<p>David    00:15:54    I think it’s important to be clear because so, so this is not about to, for instance, take, take the last part of, or what I think it’s the last part of your book where there’s tons of places where of course I agree. Those are, you know, very fundamental points in particular.  </p>



<p>Paul    00:16:07    The last part of the show, David does not good for the chefs. I know  </p>



<p>David    00:16:10    We’re, don’t worry, I’ll, I’ll disagree as soon. The, so there’s a part of the book for instance, that the notion of preformed self-organized dynamics that formed the basis. I’m absolutely on board with that. I think that’s actually where, uh, I think it would be fun to work together on how to, how to move that forward. So I completely agree, but the point is one can agree on certain things on data, on how to do experiments and disagree about the foundations of how we move this forward. And that is where I disagree. And that’s sort of the things I tried to highlight in our reaction to  </p>



<p>Paul    00:16:42    Piece. Do you want to, you want to summarize what the  </p>



<p>David    00:16:45    So I’ll summarize quickly? So I mean maybe the easiest way to summarize it is that we stand in defense of the outside in view rather than the clear inside out view that URI outlands, because, um, we think that it’s happens anyway, so that the inside out that kind of stringently stridently inside out view that you’re your for in the book is not really what happens. Uh it’s you have implicit hypothesis, you’re an implicit philosopher. You make assumptions that sort of underpin what you’re trying to explain. Then you do the very detailed implementational work, and then you go back and sort of revise and try to make it very explicit. So we have a sort of slightly cheeky phrase called the implementation. The sandwich is you can’t just start with the wetware or the brain. You actually make some assumptions about what you’re trying to explain. Then you do the implementation that will work, and then you refine the kind of psychology or cognitive science, but you sandwich the implementation work. You don’t just go from a characterization of the hardware to a puta dysfunction. And that’s where I think we  </p>



<p>Paul    00:17:52    Disagree. Okay. So you’re a, um, I want to give you a chance to respond here, uh, to, you know, David’s perspective. Um, you know, first of all, is that an accurate portrayal of your view or, or, or would you say that maybe he mischaracterized, um, some of your goals, you know, is, is there a straw man that David is addressing that, that isn’t your objective?  </p>



<p>Gyuri    00:18:17    Well, first of all, I’d like to thank David and Federico for taking the time, reading by the view and parts of my book and writing a beautiful piece, expressing differences of opinion. I think this is what we expected. I’m talking about neuroscience and the brain. I think David is represented the psychology undermine. He claims that the object of neuroscience is the hardware wetware itself. This is what he has written, but in reality, no self-respecting neuroscientists would agree with such correspondence. The focus of neuroscience is how behavior and cognition are generated by the brain, including by its interaction with this niche, which is the body it supports and other brain it community, other brains. It communicates. I agree with David that it may be premature to abandon the traditional outside-in strategy in some areas of cognitive neuroscience, because currently there may be no substitute, but my argument is that the time is right for neuroscience because the outside in framework has reached an asymptote in many areas.  </p>



<p>Gyuri    00:19:21    And that process began perhaps 20 years ago. I claim that they, and the inevitable consequence of the outside in framework is viewing the brain as a blank slate. Nobody believes in it, but in practice, this idea just does not go away. AI is still can go on brain inspired algorithms, but as a result of taking the current state of Euroscience virtually all AI is based on the tablet as a philosophy. Now, David and Federico are trying to push me in a corner of a boxing ring so they can have a vantage point to make the statements in the tradition of classic classic cognitive science. They first come up with labels, which in their view characterize my work such as epistemological primacy of the hardware. I liked that that radical implementation is I like that too. But then these terms, I explained the detail in the paper, David, I feel that these attacks actually are directed to, or towards Henry Markram his original human brain project, which recommends to build up a brain from bottom up.  </p>



<p>Gyuri    00:20:31    But I do not belong to that camp at all. And the inside out he is very different from the button-up reductionist formulation. In fact, a good part of my book is trying to explain why such program have limitations using civil arguments that David and Federico, again, the inside out has little to do with the bottom-up implementation. Isn’t it’s suggests that the debate between the cognitive terms, which are made up by our predecessors and brain mechanisms, brain mechanisms should be the arbitrators, not the other way around. That’s all I wanted to say in response for now. All right.  </p>



<p>David    00:21:07    Can I just, since we’re just on this particular issue, because it’s an important, I think that, yeah, you’re, uh, the question is how brain mechanisms, you know, brain Mickens are the arbitrators of what, right. So how will, so in the end, our real disagreement is about the utility of the concepts from the psychological and cognitive sciences and how neural data on its own could adjudicate between alternatives that are posed that way. And I think that’s, so I’m more optimistic on the side that con cognitive science and computation, and some amalgam actually have as an equal status in terms of evidence as neuroscience. And I think that’s where we may disagree. Actually. That’s why we call it epistemological primacy or whatever the phrase was. We use to say, actually at the end, this the most serious arbitration comes from the neuro-biological data, not from the cognitive science or psychological data.  </p>



<p>Gyuri    00:22:00    Yeah. So you ask it though, uh, what, what do the brain mechanisms will refer to? Well, what they refer to is already out to our preconceived ideas, but they help us to systematize to abandon some assumptions and reinforce others. I never ever thought seriously, but I’m, uh, I, I realized that perhaps it’s easy to misunderstand what I said. I don’t suggest we go into the brain. We do a lot of things there and voila, the cognitive terms will come out. No, we go into anything into anything. Any, any thought has a background. It has a context. So we, we, we always go with something in it. And I, I say it very clearly in the last two chapters that the brain always guesses, there is nothing new in the world for the brain. Everything is, everything is, is, is familiar. And then the familiar becomes with time different.  </p>



<p>Gyuri    00:23:03    If the organism has problems with understanding that. So I understand that part of my brain centered obsession can be viewed as a, uh, uh, implementational naive view. How to, how to you go here and from elements, you build up the brain. But no, I, I, I doubt that’s possible. And I, I think this is a platform that we agreed and disagree. There are other areas we will discuss later where there is a disagreement, but I, I think this is, uh, this is perhaps my mistake to, uh, not clarified enough. And I think the main problem is that indeed, that target article probably had the flavor of this, not necessarily the book, but the article was 20 years ago. It’s not a good excuse, but  </p>



<p>Paul    00:24:05    David, you were going to respond. Do you want to briefly respond  </p>



<p>David    00:24:10    Yuri raise an important point, which is, I think where we, uh, they want to make a distinction between, um, implementation driven versus sort of radical reductionism. Those seem to me a little different. So what I want to impute to your is that he is an implementation analyst in the sense of  or something like that. Although that’s not quite care, that’s not correct about Mara. We’ll get to that later. But the, and that is the, there is really a higher status of the evidence of the implementation level that doesn’t have to be reductionist in the sense of going to, you know, send haptic mechanisms or quantum mechanics. It has some level of description that is the level of the implementation of the putative mechanism is, uh, is higher ranked in terms of what it is as evidence than some other evidence. And I don’t think, I guess evidence is just evidence, right? And there’s no sort of, uh, this is the best evidence, the second best, the third best. I just don’t see that. So I absolutely agree with URI that some of the, the more trenchant critique is re directed some of the research program that kind of Henry Markram pushed, which I also see as not even coherent, actually, it’s not even clear how that could go anywhere at all. So I have no sympathy for that and I wouldn’t take the time to write a paper about it. It’s just it’s incoherent.  </p>



<p>Paul    00:25:30    Okay. All right. So this is a great beginning here, and I’m just going to right off the bat. We’re going to get into questions from my little surprise guests here, and you guys didn’t know who these were. Um, and they didn’t know they knew who each other were, but they didn’t know what each other’s question was. Okay. So the first question is, uh, from Jon Krakauer, and this question will be directed more toward your’e, but we can, you know, you can both discuss the second. And then, um, before you actually react to that, I’m going to go in and play the second question. And that is from Paul , uh, and that will be directed more toward David. Uh, and then, and then I’ll play the two questions and then we can move on. So here, if you guys are ready, here’s John Krakauer.  </p>



<p>John Krakauer    00:26:14    Hi URI. Uh, this is John, sorry that I can’t be asking you these question in person. I very much look back fondly on our debate at Columbia several years ago. So here’s my question. Um, on page 225 of your wonderful new book, you see this about tools they had to be imagined before their maker could start working artifacts. So externalized versions of a thought, a reflection of contemplation artifacts are semantic entities. So given that you want to get rid of philosophical and psychological language, um, I challenge you to restate that sentence with only neural implementational language. So no psychological terms allowed, remove the words, imagination, reflection, thought, contemplation, and semantics, and say it all in terms of neurons and circuits, I would contend you won’t be able to do it. And please, no, you, you, you use those psychological terms to hypothesize yourself about externalization. So they’re doing work for you. Conceptually, you may answer that we need to find a new language based on implementational level details, but that’s never going to happen. In my view, you’re always going to have compressed psychological concepts to express ideas about cognition and neural data will simply be confirmatory. So my question is, do you really think that you’re going to be able to forego the very language that you yourself used throughout your book to conceptualize with?  </p>



<p>Paul    00:27:39    She is. Okay, so that’s John’s, can you hold that in mind? And we’ll play Paul’s just because they’re of a similar nature, but directed, uh, in, in different directions. Okay. So here is Paul.  </p>



<p>Paul Cisek    00:27:52    So my question is for Dr. Pebble in principle, I agree that neuroscience needs behavior and that what we should seek is alignment between theories about low-level neural mechanisms and high-level psychological concepts. However, the need for having high level concepts does not imply that the particular high level concepts we currently have are necessarily the right ones. And here I agree with Dr.  that many of the current concepts of mainstream cognitive neuroscience are inherited from folk psychology and from largely outdated pre-scientific ideas about the human mind. In fact, Russ Poltrack has pointed out that about 80% of psychological terms and use today were already around in a year 1800, which is long before psychology was actually a science. So despite all we’ve learned about neural mechanisms and all that time, we still use many of the same old high level conceptual categories to describe behavior. We’ve subdivided them into smaller categories, but the general taxonomy has a resisted change.  </p>



<p>Paul Cisek    00:28:54    So my question is this, do you believe that the high level concepts have resisted change because they are so good? Or is it because they are the words we use to talk to each other about behavior and thus they have a kind of stranglehold on our scientific conversations. If the latter, then what can we do to break that stranglehold? Should we continue to subdivide and modify the same concepts or as I would argue. And I think Dr.  would argue, should we instead seek a different set of high level concepts that are better informed by all that we’ve learned about neural mechanisms and real animals.  </p>



<p>Paul    00:29:29    Okay. So you’re a, you want to respond to David’s, uh, challenge John’s John’s challenge. Oh yeah. I, yeah. Sorry. I, I, I suppose I know I’ve spoken to too many them recently, so yeah, yeah. To John’s challenge.  </p>



<p>Gyuri    00:29:44    Yes, John, thank you very much for the question. My response is very similar to what I just gave to David that I like to apologize if you thought that, uh, somehow I naively think that the should have opened up the skull 2000 years ago, and they set up going to the, to the, the forum and the Agoda in Athens and discuss these things and just look something, uh, some kind of mechanism in the brain and, uh, build from, from there, from the details of physiological observations, then I agree with you. That would be a nice thing. I fully agree with you in general, as well as the beautiful paper that you have written with David and several others. That behavior is the F the F is a fundamental thing. In fact, you may remember, and David probably remembers that in the good old days, when you did the, any kind of psychological experiment, evoke responses, for example, it was mandatory to Recode the heart rate respiration, the galvanic skin reflex and so on, because there are so many hidden variables that my food, the investigator that the bold signal change is not a cognitive correlate, but just the change in the desperation.  </p>



<p>Gyuri    00:31:12    So I’m fully with you. And I agree that whenever we go into doing an experiment, we already have our preconceived ideas and this preconceived ideas are coming from other brains. That’s what, this is why I said. And David picked up the sentence that it’s impossible for to find nothing in the brain. Yes, it’s impossible to find nothing because we have already tons and tons of alternatives, which we can exclude. This is what I call the null hypothesis that we go in. We do something, and then we can read, use the realm of possibilities, become abandoned one at a time, and this is what called scientific progress. So I don’t want to be put into the, in the bottom up build, build it up from elements, a camp, because I know it is impossible to break apart a complex system and put it together from its ingredients without understanding the higher rules that keep together a complex system.  </p>



<p>Paul    00:32:13    Uh, and, and we can just jump from there. David, do you want to answer a Paul’s question?  </p>



<p>David    00:32:19    Yes. Thank you, Paul, for that extremely interesting point. And of course, uh, very much related to, uh, points that you remakes in his original paper. And of course in the recent book brain from inside out, and also actually a point that the editor of the special issue Christophe Bernard makes about naming things and giving them kind of medieval. Nominalism giving reifying concepts by giving them a name. I think that I am much more optimistic than you and your question and your in his book about the progress that’s a psychological and cognitive scientists can make and have made. And I think it’s very important to make a distinction between the casual everyday use of terminology that has turned out to be quite useful for, you know, talking to each other and the scientific decomposition, the standard, uh, there’s a philosophical tradition, Wilfred Sellars. The philosopher makes this very clearly called a manifest image in the scientific image in that particular line of philosophy of science, which is, uh, we use certain terminology just in our, let’s say daily location because it works right.  </p>



<p>David    00:33:27    So Yuri actually gives the example of the concept greed, because it’s something we understand it’s very easy and we explain behavior quite effectively to each other. It doesn’t follow from that, that a very systematic decomposition can not yield new taxonomies or new structures of these concepts. So I think we, we just, you know, like neuroscience, cognitive science or psychology are not that old. And we need to give the field a little bit of credit. I mean, I’m a pretty nasty critic generally of the, of my own field, or at least I’m on record as being pretty nasty to my own field. But I also want to defend that the cognitive sciences in particular, as they’re becoming more and more computationally, explicit have made serious progress in decomposing things that as we don’t just use a concept like imagination, or we use a concept like, well, maybe part of the imagination draws on memory mechanisms, which themselves are complicated and internal forward models and predictive coding and ensembles.  </p>



<p>David    00:34:25    And so I think we have to be, you have to give a little bit more credit to the conceptual analysis that the cognitive sciences and philosophy are on our offering right now and be optimistic because there’s really, we’re not just working with terminology that like my personal favorites from, from, um, the time of gull things like relationship to your parents was a conceptual primitive. You mean, you’re talking about phonology time. Yeah. So, so, you know, you think now retrospectively we make fun of that, but in the context of gulls, uh, you know, what was going on in the history of how minds were explained, that’s a totally reasonable concept, right? The notion that you would have a special attachment to your caregivers, that’s not crazy. Uh, those are just, that was, but now we have of course, much more richly structured and fine grain and pulled apart conceptual analysis.  </p>



<p>David    00:35:18    And I think so I’m actually optimistic. The question that you raised at the end of your question is whether an approach that’s different and maybe more endorsed by URI would yield a different taxonomy. That’s a totally reasonable question and an empirical question, right? So as we decompose these things more find successful Lincoln hypothesis neuro-biological mechanisms, we might end up with a slightly different, uh, parts list of the mind, which would be, I think everyone is actually open to that. Doesn’t mean I shouldn’t use words like greed anymore or imagination, or will. Those are words that we use. They actually do a lot of work for us, uh, as suggests, you know, the work of the philosopher, Jerry Fodor explains very well, how simple concepts like that. They, they work for daily life, but they don’t work for a scientific analysis. And as long as we keep that separate, I think we’re unsafe ground.  </p>



<p>Paul    00:36:05    So it does seem like a lot of this is, um, first of all, thanks for answering these surprise questions,  </p>



<p>David    00:36:12    Having surprise question. Very good idea. Very good. Very nice idea.  </p>



<p>Gyuri    00:36:18    Fantastic.  </p>



<p>Paul    00:36:19    Yeah, absolutely. Okay. Yeah. Um, so, uh, it just seems like so much of this is wrapped up in the language that we use URI. Um, one of the things that you state that you had, I believe you explicitly stated is that you would like to, um, develop from neuroscience, um, sort of a new language to, uh, describe the principles and patterns found in circuits and activity, dynamics of neurons. Um, first of all, is that correct? And if so, uh, how’s the new dictionary coming along.  </p>



<p>Gyuri    00:36:57    It’s a nice question. And this is what everybody asks now, how will we make progress? Well, David mentioned and will be ridiculed the goal, and it’s easy because, uh, they are a little bit smarter collectively speaking as a, as a, as a group, but he was probably smarter than any of us back then. Now I can say the same thing about totally agree.  </p>



<p>David    00:37:24    I I’m officially on record  </p>



<p>Gyuri    00:37:28    Indeed. You know, I, I, I, I think recently we discussed it with the gym. My goal is that, you know, how easy it is to ridicule somebody. And he was, he’s very strongly in defense of, uh, of, of, of friends goal. Now the same can be said about the giant of psychology, you know, William James. And there is a, my, I pick on him. It’s not because I don’t like him, but because he’s the best. And, uh, indeed his, uh, is what I call the James’ list is something that looked like the top of the top back then in 1890. But today, many of those things look a little funny. And so how do you know, how do we know that those things that we take seriously today? Let me pick on this. Decision-making a very popular one will not be ridiculed, uh, 10 years from now or 50 years from now.  </p>



<p>Gyuri    00:38:27    So how do I imagine this progress? Let me just give you two examples. The first one is that memory planning and imagination, these are so different concepts that are different chapters dedicated to understand and explain it to students in, in, in, in textbooks that are different people who are doing this research separately, people who are doing imagination, that they don’t talk or don’t necessarily have much to do with, with memory or planning. But over the past decade also starting to have sit cognitive scientist or starting with us in neuro physiologist, we realize that, you know, one thing can be called prediction. The other fund can be called pose diction, and one belongs to the past. The other one belongs to the future related to the current presence. But now when we look at it, for example, through hippocampal, fatal simulations, or people come to shop every pose when the past the present and the future just conferences into one entity, then all of a sudden, you may wonder whether these separations, uh, justifiable and then Schachter, and, and then then many others who started looking at the imaging of the brain.  </p>



<p>Gyuri    00:39:54    They were all of a sudden surprised that the same structures and I could add perhaps the same exact mechanisms that are mobilized when you are traveling back to the past are the same as when you are thinking about the future or making plans. So based on, on, on your own mechanism, I can imagine maybe premature, but this three words in fact will be pulled together just like in relativism of physics, that they are not separate. They are the same. I hope you understand what I’m trying to say. Now, there is another interesting concept that I attacked several years ago, and then there is a recent actually I think Paul  was also one of them. It’s about attention. And, um, you know, attention is such a vague, coral, terrible concept, but it is so useful. And if you look at it slightly differently from the brain mechanism of back-end is point of view, and you say, aha, it’s nothing else, but gain.  </p>



<p>Gyuri    00:41:03    And then David and  and several other people have produced beautiful results. That indeed is a gay mechanism that we can now explain with a little bit of asset they’re calling and inhibition or another concept or another territory is running speed. What does running speed had to do with potential? I claim that they are the same thing. The attention is internalized running speed, running speed is again, mechanism. And then more goes faster than it produces a gain through perhaps acetylcholine and through the same interference that the brain internalizes this, this initially environment dependent or body dependent function and release is the same as the recording to use or produce the same thing such as, uh, again, modulation. And that’s what the psychologists call attention. So this is another example, I think when we can make inroads taking a, uh, a, uh, uh, lumping together ideas, and perhaps it can come up with a different formulation. This is what happened in physics and at home is not dividable, but we still call it at home. But with the new understanding, because now that Tom is better defined,  </p>



<p>Paul    00:42:22    Needing new words, do we need new words? I mean, it does. Is it a recognizing of psychological terms that we need, or a discarding of them, you know,  </p>



<p>Gyuri    00:42:30    Either way it worked in, in the history, you know, your genetics, uh, you know, we are still talking about the same genes. The words have been different, but the meaning the definition have changed. Every science, every science in the history of physics, chemistry, genetics, computer science, they came up with their own defined vocabulary. You’re a science, hasn’t, you’re a science is just take it for granted that we are already given a vocabulary. And V we have a roadmap. All we have to do is take those words and, uh, and, and, and, and identify them the mechanisms when I go to give a talk somewhere and I asked, and I show a James’ list, then everybody in the audience without an exception can identify with one of himself or herself. It’s one of those things it’s like, yeah, I’m going to dedicate my entire life to figure out attention, or I’m going to work on memory. I put it to work on the, on emotion. And so on as if those entities are really the ones that you will identify when you are starting poking the brain. But let me ask you,  </p>



<p>David    00:43:37    You can ask it. So, but, uh, I mean, that amplifies my concern about being, you know, carefully decomposing concepts into their constituent parts. Just like you raised the issue of Adam, right? We take it as a primitive that really is there smaller primitives inside there. Uh, but the, in the case of neuroscience where we do, of course, neuroscience does have the vocabulary for the neuro-biological parts list. Do you think we have that correctly identified? So you, I mean, you criticize, uh, often rightly that the cognitive sciences in psychology are kind of lame or using old vocabulary, but that makes a sort of implicit presupposition that neuroscience has its parts list correctly identified. And I’m not so sure that that’s true  </p>



<p>Paul    00:44:19    Things like action, potential,  </p>



<p>David    00:44:22    Take that. I mean, so we make assumption that let’s say, you know, the neuron doctrine, very successful idea is that really the representational or computational primitive in a brain, maybe it’s going to turn out in 10 years that it’s some weird ensemble that we don’t yet know. Maybe it’s seven neurons wired up a certain way. So we have a vocabulary for neurobiology, but we should, we assume that it is actually more successful. I’m not, I’m not so sure that I’m willing to buy that.  </p>



<p>Gyuri    00:44:49    Uh, you’re right, that, you know, this, this argument can go from very, from every level down to a deeper and deeper level. Uh, but it, it, it’s always the case that in order to understand one level, you have to understand something about the interaction of an upper and a lower level. I’m not saying, and you know, you, you try to put me in a corner and say that I’m trying to say, I’m not saying that you need quantum physics to explain the knee jerk, reflex. The knee-jerk can be explained it the way how I explained it to my mother-in-law, but we have to ground our observations with other observations. We seem to be independent because otherwise people will be double-dipping. So when you say we decompose and they don’t  progress, and you, your site, your own area, which I, I really respect. And I will come back to that, you know, by decomposing the, the, you know, you just gotta go and say, oh, I’m going to the brain to figure out language. No, you have to decompose that, those things.  </p>



<p>David    00:45:57    So it’s like a tension. These are complicated concepts with internal structure,  </p>



<p>Gyuri    00:46:01    But then it reminds me a little bit of the program of Hubel and visa, because you will, and as I said, you know, vision is a complicated things. Let’s break it down to a simple ingredients. Let’s just study the impact of horizontal and vertical lines on neurons and then edges, and then contours, and maybe a little color. And someday from this elementary things that we identified, we’ll put together the composition. And of course it didn’t work. And it, you know, I always wondered, you know, how many more monkeys cats and how many Euro scientists are needed and how many more experiments to move forward. And the answer is that if the entire world in diet 6,000 neuroscientists in the world will do nothing, just the same kind of experimented the same outside in framework, then we wouldn’t get much smarter. Uh, so I wonder if that decomposition that is done without grounding is more than just an exercise. And my answer is, yes, it’s just an exercise. And your answer is that what I recommend is going to do the deeper is also an exercise. And this is what science is. You know, it’s an exercise and we’ll go to the sandwich problem a little bit later, but this is exactly what we do. We try to ground a unknown or less, more familiar  </p>



<p>David    00:47:30    Things, but I may look, so I respectfully disagree that the decomposition is just ad hoc. I think the point of it is that you take some idea, you know, whatever it is, attention, language, pick, pick your favorite psychological domain. We pick it apart, you know, like peel the onion, because the assumption is that one of those sub things is a more plausible way to link to the neuro-biological thing, right? So if you say what’s the brain basis of languages, I have no idea that that’s made up of dozens and dozens of sub routines, but if I can make it into something that could be realistically probed at the level of the cell, a circuit on ensemble, a column, God knows what don’t you think that that would actually then be at the granularity of linking hypothesis between the neuro-biological infrastructure and some part of the concept that we’re trying to get. So it’s not an ad hoc decomposition, it’s a decomposition with a motivation to find, to identify a linking hypothesis to the neurobiology. I mean, I think that’s the premise of the research program.  </p>



<p>Gyuri    00:48:35    Well, I think what the inside out framework suggest is that it’s on a safer ground when we compared his decomposed ideas and comfort them against brain mechanisms than without right mechanisms. And I can ask the question from you that you’ll composition or decomposition help you to ground your things by relating them to brain rhythms.  </p>



<p>David    00:49:06    That’s exactly  </p>



<p>Gyuri    00:49:08    That. But let me remind you that we went out and figured out and try to work on the system of brain atoms at the CrossFit, fancy coupling without language in mind. So, you know, we have observed that these are regularities that are there probably to serve something important. Then we can talk about later if you want, why they are important and in general principles. And it happened to be a food type ground for language,  </p>



<p>David    00:49:42    Right? It does this, it does this as a perfect example. So does this,  </p>



<p>Paul    00:49:46    The David, can you just actually just summarize like that linkage before, uh, you know, that the phoneme like still  </p>



<p>David    00:49:54    So we can take them? So, so there’s a, there’s an aspect of language. Comprehension is a complicated problem. It’s complicated from a point of view because you have the more or less continuous signal. And so as we’re having a conversation, there’s a physical wave form. It comes to you. And in the end, the remarkable thing of communication is that I send you a physical signal to your ear, and it ends up as an idea in your mind, which is cool. The fact that we can have a conversation is actually unbelievably impressive from a brain’s eye view, from a mind’s eye view. One of the problems you have to do is you have to actually, uh, this has been known for a long time, certainly, you know, for, for probably a hundred years, it’s been discussed. It’s certainly since the late forties, how do you actually chunk the information that’s coming continuously into usable, uh, bits of information that you can then work with?  </p>



<p>David    00:50:45    This has been a pain in the ass of a problem for a long, long time. And some years ago, you know, there were no functionally driven questions, right? Questions about how do you about this? How would you do this in a, in a few people, a community of us thought, well, here’s a really cool way to do it. So there’s this interesting thing of, you know, excitability cycles or oscillations in the brain. What would be if you actually found a link between what they’re doing and that particular problem, because it gives you a potential mechanism for trunking things at different timescales. And that’s a hypothesis that you can chunk things at different levels of sizes. You know, let’s say at the syllabic level or smaller levels, and then you can do experiments. Is it real? Is it, you know, some people think of totally epiphenomenal, I think they’re wrong.  </p>



<p>David    00:51:30    I think it’s real and important. You can begin to really try to parse it, part that problem. So there is a case where there’s a neuro-biological phenomenon, which turns out to be immensely useful for querying a functional aspect of language comprehension, but note that it’s because we knew the particular problem that had to be solved, naming the problem of segmenting it, continuous stream into smaller chunks of different temporal sizes, and that’s the case. And so the reason I liked that line of research is because it’s, to me a really interesting linking hypothesis between a part of language comprehension and a part of neurobiology, both of which are very well motivated. And then you can try to figure out how does it work? How do you manipulate it? You know, how do you actually cash it out? So that’s, that’s sort of the, and I think that’s an area about Yuri and I probably very much agree, but that particular issue, that’s not  </p>



<p>Gyuri    00:52:22    David. What other arguments you think you could use to convince your colleagues, that your segmentation that you have done is right. If you don’t go and link it to brain, it comes,  </p>



<p>David    00:52:38    There are many,  </p>



<p>Gyuri    00:52:39    That’s what are called grounding. Because once you start that argument, then you are on safer grounds.  </p>



<p>David    00:52:46    I mean, it’s a form of evidence that I happened to find compelling, but of course, let’s say you’re working. You’re somebody who works just on automatic speech recognition, or, you know, some natural language processing. There are psychophysical, uh, risk, uh, data that might be as compelling for you. So for instance, take experiments, psychophysical experiments that my colleague Odette gets us, has done it. BU if you take a speech stream and you’re removed the cues that allow you to have some kind of trunking, then your intelligibility goes away. And if you put in signposts, that gives back some kind of rhythmicity in the signal, you get intelligibility back, right? So those are purely psychophysical data that are consistent with this. We happen to believe that it’s, we want to go to neuroscience because we’re interested in neuroscience, but it’s, but psychophysical and computational data are of course, in my mind of equal status. You’re right. That it gives you the, the biological data. It gives you extra reason to ground it that’s, uh, these independent forms of data are also legitimate and epistemologically at the same status. I would argue,  </p>



<p>Gyuri    00:53:52    David, let me push you in a corner now, please. So here’s a quote from you that takes to be explained our human cognitive faculties. So here are my questions. Do we know them? Do we know how many things we need to explain what authority made up these terms? Isn’t that a bit of arrogance by claiming that we know what we are, what we are looking for or what you should look for these days, you probably have noticed also that many papers introduce a problem by saying that in order to solve problem X, the brain must have this or that function. Where does this confidence come from? Brain evolution. Didn’t start out, generate a program where the end product is, should be the human level, cognitive faculties, instead of brains evolved to use actions, the learn the consequences of those actions as afforded by a particular environment.  </p>



<p>Gyuri    00:54:51    The brain is not interested in the true nature of the world. Instead its main occupation is help its host to survive and prosper it’s niche. Do we know at all that the attention motivation and so on, these are entities. So when you look at the brain problem from a wider perspective, then these issues inevitably come up and then the start moving away from the historical aspect of brain research. We started out with the problem of how the mind works and this is what we have to figure out. And we work our way from the top to the bottom. I think Paul seasick beautifully also explains that why evolutionary concepts are so important as frameworks to understand, but they’re very complex function cannot be unbroken into simple pieces, but it can be put into an evolution context.  </p>



<p>David    00:55:46    Three bears just ran right by my window. Oh my black bears. They’re right there running right by my house. Oh, here comes the mind. You’re paying attention smokes. Yeah. Well there is no concept of attention according to URI, so I can’t see them, but  </p>



<p>Gyuri    00:56:04    There’s a gain mechanism.  </p>



<p>David    00:56:06    I’m going to appeal to my game that I saw there. They’re right next to my garage. It’s incredible. Three, three Cubs that are big and a big mom. Holy cow. Okay. Sorry. That’s very, that was very distracting. My visual periphery. Sorry about, okay. So that’s, uh, I don’t feel pushed in the corner at all by that because I think it’s a fair point and it’s exactly the point that I want to push back on with you, which is what, why are you confident in the neuro-biological primitives? I think that the concepts we use in the cognitive sciences are hypothesis. I mean, so I don’t think that there’s a particular arrogance to them. I think that there’s our hypothesis about the parts list of the mind just as the  list was a hypothesis. That part, the parts list of the mind, except that he made some weird, extra assumptions about phrenology.  </p>



<p>David    00:56:58    Although his , I think is quite fascinating and good, but our hypothesis does, those are hypothesis that people I think are willing, very likely to abandon. When we think about the scientific approach, it’s unlikely that people are willing to abandon them when we use this terminology, just to explain at the superficial level human behavior, because if you know, like I’m hungry, I’m going to meet my wife for lunch and you know, I’m very greedy. So I, you know, I’m going to invest in apple or whatever, but the parts list of the mind is a negotiable. And what is the right granularity of analysis of that parts list is negotiable. I think those are empirical hypothesis. They can be tested neurobiologically, which is what we all want to do. Computationally psycho, physically, theoretically. So there’s I think a rich reason. So just like I’m claiming that you can’t be sure, you know, that maybe the neuron is the right level of analysis.  </p>



<p>David    00:57:54    Likewise, you’re correct. I can’t be sure that attention or language or word recognition is the right level of analysis. And I don’t think there’s, I think those are all up for debate. I think that everything is up for debate. I’m just wanting to point out that it kind of, uh, a decomposition of these concepts to the level of things that could be a linking hypothesis is what we’re trying to seek to make progress there. Right? So at the level of, let’s say neural dynamics, self-organized systems, that kind of stuff that we can measure and test and so on.  </p>



<p>Gyuri    00:58:25    So how would the decomposition would work in other areas of, uh, of, uh, cognitive sciences without grounding to the brain? And what comes to my mind is a large other area, which is not cognitive science, but psychiatry, no DSM-V, which is the popular and hated. The big book of, uh, my wife was a psychiatrist and many other psychiatrists is that it’s full with terms. And this terms were made in good faith by intelligent people and for a good purpose. So what is the Mac? What is the, the strategy for decomposition? Unless we go back to the brain and from the brain perspective, it will say, Hmm, no, this two or three or four diseases in fact has the same mechanism and perhaps the same substrate because they respond to the same treatment. For example, about 20 years ago, people started to have psychiatry started to use the drugs that have been used in epilepsy and took a long time to figure out or learn that, oh, the reason why it happens is because many of those psychiatric problems are due to a hyperexcitability that if you had the recording from the right part of the brain would be classified by an epileptologist as seizure.  </p>



<p>Gyuri    00:59:53    So that’s one example. So without going into the brain as a classifier and the final arbitrator, I don’t know whether we can make a, we can make progress of course, but in 2020, I think, you know, you are a prime example that you want to tie your work and research as much as possible to the brain rather than doing decomposition independent of the brain. Again,  </p>



<p>David    01:00:24    Paul, go ahead.  </p>



<p>Paul    01:00:25    Well, no, I, I don’t want to break the flow. I, um, you know, just on a personal note, I recently had Steve Grossberg on the show. And one of the things that we talked about was his sort of cyclic method of developing his theories and his models. Uh, and, and it, it struck me as I’ve had it all wrong. Uh, you know, thus far, you know, I’m slowly correcting myself as well. Everything’s a slow developmental process, but, you know, I have thought, uh, well, you know, like Yuri, I don’t know that you think this way, but like, I think, okay, we have all these brain mechanisms, like what could they do? And that’s sort of a bottom up approach. And then I’ve also like, sort of swim in the like, well, attention, you know, what, how could that relate to consciousness and how could these psychological terms relate?  </p>



<p>Paul    01:01:08    And then how could they relate to brain? But Steve, um, professors too, and I believe him because he’s done it for a long time, he’s been very productive, uh, that he begins with the behavior. And he actually seeks out a paradoxical behavioral findings and assimilates and synthesizes, a large body of behavioral work from psychological experiments and psychophysics experiments. And then from there spends about five years, he says thinking, uh, and that leads to design principles that then he can implement in models and then start thinking about the mechanistic implementation of these things. And because he starts with behavior, it doesn’t matter what the terms are for the psychological processes. Um, likewise it really doesn’t matter what the mechanisms are neuronally because, um, his claim is that he does it lets it stew for a long time and thinks about what sort of design principles could implement, something like that. Uh, and in fact that he originally started doing that before he knew anything about neuroscience and I presume about psychology,  </p>



<p>David    01:02:13    I’m sympathetic to that, right? So just to respond to specific to Steve gross Berg’s program, but this particular part of his research. And in fact, that is sort of the, um, a core part of the paper that, um, Jon Krakauer, ACIF Gaza far out school Marine and Malcolm MacGyver. And I wrote that is to say there is a sort of core of identifying well, characterize behaviors, hopefully behaviors that are really interesting to their creature, not like weird animal, not weird lab, things that are just highly artificial, but really trying to identify, you know, reviving the ideas of ethos. And so not forgetting the incredibly important contributions from, you know, the fun host from fish, Tinbergen, Lawrence, who really were extremely careful at describing what might be going on, being inspired by that and turning that into experiments in neuroscience. And so we are on record of saying, look, that aspect of extremely thoughtful characterization of behavioral should take an equal ground in neuroscience. It should be, uh, should be revived as having this important, uh, role for stimulating research that’s. And so we, so I think I would be in favor of that kind of view of,  </p>



<p>Gyuri    01:03:26    So it’s a very interesting thing. You know, you just brought up pathology. I started out as a nephrologist. I, I produce Ethel grams in cats and rats. And because that was the big thing at a time when the Nobel prize was given the Tinbergen Fisher and Lawrence, who was by the way, uh, uh, Hydaburg Heidi, Greenberg’s a mentor. Um, and the question in my mind is always that, why did it die? Why did the tology die? And the reason is simple because it never tied, it’s systematic exploration to the brain. So here’s one example. I spent enormous amount of time as a graduate student, arguing back and forth with others that Pavlovian conditioning is a symbol substitution, or it’s an epistemological behavior is the dog or cat or a rat is a proud to the loudspeaker because it is so stupid that it thinks it’s food or as the similar substitution predicts, or it is because it became a new goal as a result of the, of the contingencies.  </p>



<p>Gyuri    01:04:42    And this was the time when a term was introduced called auto shaping. Maybe you don’t even remember what it is, but basically what it is is that you put an animal into a box and you present a lever for example. And all the animal has to do is whenever the level is presented, it just gets the food automatically. Doesn’t have to be a, except the lick, but animals can’t help red start pressing the lever. They shaved themselves to do what Skinner is, shaved them to do pigeons pack on the light source and sword because they can’t help. This is how they are put together. The brain is put together day that without behavior, without action, they learn, they never ever get anything. They have to behave in order to receive something. So the debate went on and on and on that one is the, the packing behavior or pressing behavior or approaching the loudspeaker is a consumer three behavior or preparative behavior. And this was unending. I put an electrode in the hippocampus and in an hour, the answer was clear because hippocampus theta, oscillations are classifying all behaviors into two major categories, prep, and authority or consumer theory. And that was always data behavior. That was the end of the debate. So I realized in order to make progress, instead of doing programs, I should start poking the brain.  </p>



<p>David    01:06:17    I mean, it’s true that, I mean, I, it’s an interesting story. I didn’t, I think you’re right. It’s interesting that why did you solidly and neuroimmunology not, not just flourish suddenly and maybe it was a focus on behavior, although the  </p>



<p>Paul    01:06:33    Wasn’t it behaviorism,  </p>



<p>David    01:06:36    But not really because behavior prior to that, it was, you know, behaviorism was  </p>



<p>Gyuri    01:06:41    Killed off in the 1970s.  </p>



<p>David    01:06:44    Behaviorism was effectively killed forever in 1959 and 1959. It was over, it was a,  </p>



<p>Paul    01:06:51    Is it because, um, we needed, we thought we needed much higher control over the experiments. And so everything came into the lab and pathology was discarded because of that,  </p>



<p>David    01:07:01    Maybe it’s a good, I mean, it’s so it does raise an interesting problem of that. We have currently in much of our research, right? Which is this tension between experimental control and naturalism, right? So on the one hand, the interesting, the things we would like to understand are the sort of natural class of behaviors that organisms have. And then we bring people into the lab and have them, you know, press a button for a Godber patch, tilted to the right, which is, you know, like a little bit, but it’s hard to do experiments and naturalistic preps  </p>



<p>Gyuri    01:07:29    Kill behaviorism for me or skin areas is, uh, two things. One is, uh, the students of Skinner Brandon and to work in, uh, in, uh, in the hallway have published a nice book called misbehavior of organism as a response to behavior of organism, which showed that it, when in many cases, animals just cannot be trained, no matter how many trials you are trying to do. And that is less to Seligman prepared, contract prepared and unprepared category is that there are certain rigidity in the, in the brain or in evolution that doesn’t allow you to train animals. For example, uh, a red Terea on its hind legs to avoid the shock. This is just naturally incompatible. So the animal’s brain is unprepared for that. And then later on learned helplessness and all the others came about. So this totality or globality of ask me any behavior and I can shape it, or I give the brain two different stimuli and they break an associate. Anything you want was just not true. And it was very clear. So then the turn went back or the, the, the cycle turn back to the idea that you have to look at the brain, what are the constraints of the brain that allow things to happen and what things in the brain does not allow other things to happen?  </p>



<p>David    01:08:55    Yeah, I mean, from the perspective of this psychological sense in the cognitive sciences, that, you know, Skinner’s 1957 book, verbal behavior will serve, supposed to be the apex of explaining, you know, complex human function. And in 1959 at Chomsky wrote a review of verbal behavior that was effectively a kind of just unlogical grounds and examination of could this work in principle on logical grounds. And it was a very, it’s the ultimate reviewer. Number three, if you ever want to read something at career ending, read Chomsky 1959, it’s very neutral. It’s very cold clinical. And it also ended behaviorism and psychology. That was a, it was a one event for the law, for the cognitive sciences there. So that part didn’t work. Interestingly, of course, with respect to AI, that’s coming back, right? The notion of that, that you were a criticizes. And then I also relentlessly dislike this kind of tabula rasa approach is deeply ingrained in the AI approach to these problems. And I find it very surprising. It’s going away a little bit, but it’s sort of still,  </p>



<p>Gyuri    01:09:58    Well, if it’s going away to bring in sleep as an officer of a line, a helper and so on, but it’s basically a blank slate.  </p>



<p>Paul    01:10:08    No, but just, but there are priors in the form of the operations performed. So the classic example now is a convolutional neural net, right? Well, it’s still randomly initialized. Exactly.  </p>



<p>Gyuri    01:10:19    It’s still that  </p>



<p>Paul    01:10:20    The weights are, but, but there’s structure to the structure built into the network. I mean, I’m not saying it’s there yet. Now that AI is building in all of the things that it could, but it’s moving in that direction.  </p>



<p>Gyuri    01:10:32    So for the sake of argument, let’s suppose for a moment that the idea that the brain’s most important occupation or preoccupation is maintaining its own dynamic and everything comes secondary to that. If you take that, I would say important message of my book. Then where is the AI that starts out with that model where you say, oh, the first thing we have to do is to have a, a model that can have all the possibilities. It generates a realm of possibilities, what this system can absorb and code for rather than the other way round. And I don’t see it yet.  </p>



<p>Paul    01:11:18    No, but we don’t, but neuroscience doesn’t know the, uh, the, the right way, how to construct a realm of possibilities. So it would be hard for AI to code it in. Correct.  </p>



<p>Gyuri    01:11:30    Um, I would say we don’t know. Yes, but we are making some progress at least. So for example, my, uh, my ex postdoc, uh, Ken Harris showed that the spontaneous patterns in the auditory cortex give you the realm of possibilities. So when you present signals of natural signals, uh, artificial signals, they all fall into this realm of possibilities is no boundary violation. You cannot produce anything in the normal brain or in the human brain that is not already not there as a dynamic. That the most important thing for the brain is to keep that dynamic together. You know, there are so many things that the brain has to the brain networks have to deal with, and they are competing with each other as a stability and reliance and, and a perturbation, uh, sensitivity and so on. So the only way it can put together is to have this extraordinary, diverse conglomeration of dynamic.  </p>



<p>Gyuri    01:12:34    And the moment you change it, then you have a disease, but learning no matter how much we learn, the dynamic is not changing. So it is fundamentally different from the tabula rasa approach, where, or the AI approach, where the complexity of the network scales with the amount of experience my brain does, not your brain does not know an AI, you know, the shame or the problem is this bug called catastrophic interference. And they are various ways of dealing with it. Now sleep as a, as a saver is perhaps there, but catastrophic interference comes there because of the, the blank slate model, the brain solve that. And that’s why I’m saying that. I like to see a little, somebody will be thinking about to, to start building a simple thing that kind of blew up only 500 items, but at least start out with the a thousand possibilities rather than start out with zero.  </p>



<p>Paul    01:13:38    That’s interesting that hearkens back to the original symbolic AI days, where we were going to build in all the expert systems where you build, build in all the knowledge. I know it’s very different, but it does remind me of that since we’re on AI, uh, because this is a topic that I wanted to talk about anyway. Um, and, and you’re, you just sort of made the case for the inside out approach to building AI. And my question was going to be whether any of this matters for AI, uh, David, do you have a, um, a counter to, to that inside out approach to building AI? I’m actually,  </p>



<p>David    01:14:09    Uh, I’m, I’m, you know, we, we’ve taught you and I’ve talked about this before. I’m actually very sympathetic to your position on this. I think that’s, uh, I think that’s, uh, an, an AI that actually, uh, a kind of Neo AI that takes this position into account and evolution for that matter. And doesn’t, uh, it would be a very interesting new space. I’m super impressed by what AI systems can do. I think they’re amazing. And I think they’re about engineering. I’m very moved. I’m impressed. I don’t think they have anything to do with how minds and brains are organized. I enjoy, uh, reading about yet another network and yet another layer of convolutions and pooling. And I have the vaguest idea of what it has to do with anything other than it solves an engineering question, but not a question about how biology works or about how minds work. So,  </p>



<p>Paul    01:14:59    Um, is it intriguing to you though that, um, you know, for instance, convolutional neural networks, and I don’t mean to harp on this, it’s just that that’s where the state-of-the-art is. What’s what  </p>



<p>David    01:15:09    Your podcast is about. So you have to actually  </p>



<p>Paul    01:15:11    Be, well, no, well, I mean, I’m at CNN in particular, but, but that sort of structure, uh, explains a lot of the neurophysiological data seen in the ventral stream. What do you mean with the  </p>



<p>David    01:15:20    Explains? It explains, so, so let’s say the work of Danny Ammons and Jim Carlos, and amazingly elegant way to show that you can get at some layer discrimination between visual images. I just don’t think that that’s what vision is about.  </p>



<p>Paul    01:15:34    No, but when you look at the activity of the units, it matches fairly well with the activity of neurons of neurons. Uh, and so there’s, there’s not a complete disconnect there. I mean, th there’s no,  </p>



<p>David    01:15:45    You’ve got a certain level. No, I think so. This is actually an interesting conceptual point is up to a certain level of analysis. It’s the, those seem like potentially descriptively adequate systems. And those are the question is, are they explanatorily adequate? So they capture a good distinctions that at some layer of, you know, the inferior temporal cortex, you can make discriminations between things. And yes, when you probe at some layer down some layer of evolution, it looks a little bit like a receptive field. You can even make these cool, super stimuli by exploiting the properties of these DNS. Uh, I’ll wait for the movie, you know, I’ll wait, I’ll wait for the movie, but at the moment, I’m, I actually rather am in favor of the approach there that, that you is arguing for, which is let’s take it into a very different direction, into a much more  direction.  </p>



<p>David    01:16:40    We do know about intrinsic dynamics that are just there, and it would be extremely interesting to see what space that provides and what kind of AI and yeah, I’m, I’m not a big fan of learning. I’m, uh, I’m probably way more nativist than you guys, but I’m impressed by what, you know, evolution and what genes carry and that they provide an operating system. That’s so rich that it provides you very clear boundary conditions of which like the work of Ken Harris that you already mentioned them as extremely interesting. Like you can’t go outside of your coordinate system and why would you, it would be very weird. I mean, you have a really amazing operating system in the brain, Microsoft brain or whatever, it’s a good, uh, so that is going to be your, the thing within which you work and it doesn’t, you don’t give a trillion learning trials of some arbitrary and, uh, biology is not the mother of all regressions, the daughter of evolution.  </p>



<p>Paul    01:17:37    Okay. So, um, before we go on, because there are some specific, um, claims that maybe we can go through here in a minute, but I want to ask what’s at stake in this, because, you know, th th there’s this issue, uh, I’m stepping outside of AI and back to our psychology, um, you know, high level implementation level psychology brain inside, out, outside in discussion. Um, does this matter issue in the long run, um, because science, you know, won’t science self-correct either way and progress and work it out regardless of our opinions of how to proceed, or, or is this conversation more about speeding up that progress so that we can see it in our lifetimes?  </p>



<p>Gyuri    01:18:17    So one level is practical. I can say that we have tried the outside in for a long time. And, um, we got to a point where we said the progress is slowing down. And so he can say, why did you try something else? And the inside out perhaps is, uh, is an alternative that will use new data, you, new ways of thinking and so on, and maybe feel run into the same problem sooner or later, it’s hard to predict. So whatever the reason it is, uh, uh, I think the time is right for now, there are deeper issues. The deeper issues is that the, the, the outside in inevitably produces concepts that are difficult to digest. In other words, in order to go from the perception of the world to action, you need some funny thing in the middle that you can call, you know, Homeland Calusa consciousness, so decision-maker or anything.  </p>



<p>Gyuri    01:19:18    And, and that is very difficult to, to bypass. And as I try to illustrate it in my book, that the way how the outside in approach works is that you put the experiment in a privileged situation, because he’s the one, or she’s the one who is presenting the world at the same time recordings and interpreting the signals from the brain. So she or he has access to both, but the brain does not the neurons that you are recording from, have no clue what happens out there that has to be grounded and grounding is the action. So you need a reader. You need a disciplined way of figuring out whether the correlation you find is actually utilized by the brain. So in the lab, the, not necessarily the mandatory EDU, but at least it’s the desire is that when we find an, a correlation and we have to show that those patterns that we link to behavior actually are read by downstream readers.  </p>



<p>Gyuri    01:20:30    Now, this brings me back a little bit to AI, and especially the robotics or the brain machine interface, paradoxically, or not the brain machine interface literature, or the people who are working on it, subconsciously realize how important this is because they had a reader, the damn cursor, the cursor have to move up or down or somewhere. So there’s no negotiation what you have to do, because every single thing that you feed in from the brain has to accomplish a goal. It has to be interpreted by the reader. And so this is the kind of attitude that said, if you go and start doing a building of the system from the point of, of readers, that it’s much, much better off than studying from outside and from a higher level and see how the brain responds and trying to interpret you as the experiment of those responses without having a grounding mechanism. So  </p>



<p>Paul    01:21:36    It’s not the psychological process you’re trying to, that’s not the object that you’re trying to build up a mechanism for. It is the actual,  </p>



<p>David    01:21:44    So I, I think, so your question was, what are the stakes, does this matter? Or is this, you know, uh, and there’s two answers. One is, one is, uh, with Yuri. I agree that there are practical concerns. I mean, what are the next steps to advance our agenda and asking what are the hardest questions of science? I think the stakes are unbelievably high. Um, the stakes are what are, what is the parts list of the mind and brain that is the, what could be a more important list. I want to know, just like physicists, want to know what are the elementary particles of how the universe is organized. I want to know what are the elementary particles of how minds and brains are organized.  </p>



<p>Paul    01:22:26    Do you think there’s risk that we won’t get there, or just that we’ll get there much, much more slowly if we do it the wrong way?  </p>



<p>David    01:22:33    Uh, well, I’m not a that let’s say, is there a field called the F you know, there’s a field called history of science. I don’t know if they cares a futurism of science. I’m not a science futurist, I’m an optimist. I think that, you know, there is a fact of the matter, I’m a naive realist. I believe the world is as it is, and we can figure it out. And I would like to use the best, put our best foot forward. Right. And so I think the, uh, slightly too casual dismissal of the contribution of the psychological sciences comes at a cost that I don’t want to incur. I mean, that is the main argument of the, of the, uh, paper that Federico and I wrote in response to your paper and book, that is it’s, it’s too quickly dismissing, uh, the conceptual architecture of cognition, because of course, absolutely true.  </p>



<p>David    01:23:20    There’s a lot of lame and shitty work. That’s legitimate and it’s annoying, but there’s also progress and good work. And we shouldn’t, as we said in, don’t throw the baby out with the bad science bathwater. That is the, there, these are realistic, well substantiated hypothesis about elementary, the elementary particles of the mind and brain. And they should have the same kind of status as our hypothesis about whether the neuron doctrine was right, or whether everything, you know, should be situated at the level of, you know, a dendritic spine or whether we should be looking in jeans. Let me make one more reason. So I think the stakes are really high and really exciting and important. We want to know how the basics work. This is the most fascinating field. One thing I want to disagree with w w or partially disagree with the worry is that the arbiter of everything is action.  </p>



<p>David    01:24:11    I think that’s certainly part of it, but I would submit that one intermediate step, maybe a prior step to that is actually storage as one of the remarkable things we have, you know, in this sort of pre-configured or preformed, uh, nervous system operating system is the ability to store stuff. Some of it is already there, and some of it we have to put in, and that forms the basis for many things sometimes, including action. So I think, uh, one of the deepest questions that we owe in the neurosciences in the cognitive sciences is how does storage actually work? You know, it’s extremely difficult. And I think that the answers we have at the moment strike me as a very unsatisfying, like, oh, it’s the pattern of synaptic forms or something like that. That’s, you know, maybe, or maybe not, but it seems not. Uh, uh, so I think that’s one of the things for the future to, to dig in deep, because it’s going to lie at the basis of the links between how brains work and how minds work, which I take to be more or less the same thing. Right? So there are different coins of the inquiry.  </p>



<p>Gyuri    01:25:15    Well, I, I, we work on memory, but memory and storage is totally useless without action. There’s no need there’s no. If, if, if, if something in the brain is not implemented and the implementation can happen only through action, then it doesn’t exist.  </p>



<p>David    01:25:32    But as, as action for you a larger, just to word, that thought thought, okay. So that’s what I mean. So, so it’s a larger,  </p>



<p>Gyuri    01:25:42    I was going to  </p>



<p>David    01:25:43    Ask if memory is  </p>



<p>Gyuri    01:25:45    Memories of the production too. So, you know, what is the largest stake? The larger agenda to me, he, the inside out versus outside in that if the build our logical system from the inside out, maybe the middle will disappear. And those problems that, uh, a large baggage of the outside in such as the little man, such as decision-making and all these very difficult concepts you be addressed in a total different light.  </p>



<p>David    01:26:17    Do you have an example how that would work?  </p>



<p>Gyuri    01:26:19    Well, the example is the way how I viewed the whole thing differently. So in chapter three, I discussed at life quarterly discharge, the difference between perception action and action perception. So the, this asymmetry occurs because every single time the brain sends an output to the actuators or a thought, it always signals back to what we call sensory part. Uh, and this is, uh, this is, this is the key moment because once the brain figures out and learns how to disengage from the muscles, the same mechanisms can be used for signaling about what if scenarios, what happens, what would happen if I would have made this action without actually doing that. And this is called thinking. So starting from the action and going back way,  </p>



<p>David    01:27:22    What are the elements of that? I mean, I, I’m all on board with corollary discharge, and in fact, it’s very close, although not identical cousin, that you have internal forward models that have not just corollary discharge, but  </p>



<p>Gyuri    01:27:36    No forward model is a variation of the  a down to earth mechanism.  </p>



<p>David    01:27:42    There are the elements of that red. So when that becomes thoughts, I mean, the, the, the, those are, so what are the conceptual or representational primitives of that corollary discharge?  </p>



<p>Gyuri    01:27:52    So we can start with evolution and look how this works in the cricket. And then we see how this basic mechanism is being in getting larger and larger, larger. And I give a separate examples in my book is that even emotions, uh, explainable by Corona discharge, I, uh, have a chapter that you not the chapter, but the paragraph at least there, that you didn’t criticize the language is, is, uh, probably based on court order to discharge the thought itself. The thought process is based on court discharge or this mechanism or something like that.  </p>



<p>David    01:28:33    Yeah. I mean, the reason I didn’t criticize that party, so I’m actually very much in a huge, well, it’s not a notion of fandom. I think that, that, there’s something extremely useful about that concept because I myself have done a series of experiments actually on how corollary discharge plays a key role in speech production. And the question, whether it scales up there, I know what the primitives are. The question is whether it scales up or scales sideways to other aspects of thought, right. And that’s a more difficult, so that, yeah,  </p>



<p>Gyuri    01:29:01    I agree with that, but this is what I tried to scale up as an existing mechanism, as something that inherited from a phylogeny and that can be used and exploited in multiple.  </p>



<p>David    01:29:12    Yeah, no, that’s an example of where, where I think they, there’s a really cool mechanism. I think for the first time, perhaps I’ve seen in the work of bounced mob, the physicist that talking about that because of eye movement control, right. So you have to have a copy sent into the future in a quite explicit paragraph in 1880s or something like that about, and then later in the work of  for instance, right.  </p>



<p>Paul    01:29:39    I just want to take a time out and make sure because we’re, we’re, I’m coming to, you know, time, I suppose, just, I just wanna make sure there’s not something that you want to make sure that,  </p>



<p>David    01:29:48    But I want to find a while you were so something intelligent. I want to find, I have, I want to find a quote to really nail him to the,  </p>



<p>Paul    01:29:57    I took out, he’s pulling out the book. I took some notes  </p>



<p>David    01:30:00    Here to really, you know,  </p>



<p>Paul    01:30:02    Did you want to respond to that?  </p>



<p>Gyuri    01:30:04    Well, uh, I’d like to, I planned to say something and I’d like to say no, that it’s an interesting thing that we have this debate and about the origin of these words. And, uh, it would w you may wonder why this is so natural that we have this tendency or his urge to come up with words and terms. And the, the way I try to formulate this is something like this, that every animal semantic information is derived from personal experiences from episodic memories. The first time you see a dog is an episode, it’s your personal experience, but you see when you come across many dogs, the spacial temporal conditions of those specific episodes are stripped off. And what is left is the semantic entity of the dog. So learning semantic knowledge typically develops from ego spell equals specific individual interactions. This applies to all animals.  </p>



<p>Gyuri    01:31:04    However, and this is a funny part in humans. This process is cut short. We have externalized, not, we evolution externalize a great deal of brain functions through artifacts and especially through language. So now we can learn exquisite semantic knowledge by absorption, not by video episodic memory that is learning from others, who name things for us. We learn about giraffes tigers, also about angels, Santa Claus and things like COVID NASDAQ index and galaxies. And you can go on now, where does this lead to us? This is a tremendous advantage, but it comes at a cost that we accept the existence that the existence of the students without ever experiencing or questioning them when these terms are used, especially by authorities, they become real, but you may wonder, are they really real? It is so easy to say that the reason we understand something is because the brain processes information, or we can say we sense time and space, because we have placed cells and time cells in the hippocampus.  </p>



<p>Gyuri    01:32:16    So these are the statements I have problem with. And I illustrate it in various ways. And I have a cartoon, um, at the beginning, I think in chapter two, that the prefrontal cortex has already at least 200 terms that have been associated one way or another with some terms. So the reason why we tend to name things is because we have this urge, because we think that once we have a word for it, that explains. So the crux of the problem is that we use words and the terms to explains, to explain things that have to be explained. This is what I call the . And this is I think something that David picked on also, and there is I think, a fundamental agreement that there should two aspects shouldn’t be used interchangeably, but how we get away from it is the big issue.  </p>



<p>Paul    01:33:20    But isn’t part of your agenda to create new vocabulary based on neural principle,  </p>



<p>Gyuri    01:33:26    You grounded vocabulary. I don’t necessarily need new words. If, if for whatever reason a new word will clarify things better, let’s go for it. But, you know, I’m, I’m happy to work with the word interneuron that was created a long time ago, even though now that we know that many garbage of neurons have a long projection, but then we refer to . We already define it. We know exactly what we are talking.  </p>



<p>Paul    01:33:52    I’m no language expert, but you got, well, we do have at least one language expert among us. Uh, the way that new words are created is not someone doesn’t decide to create a new word. And then it becomes a new word. It’s someone, it gets, it gets something sticks. Yeah.  </p>



<p>David    01:34:08    Yeah. But I think, I mean, what, what you were saying, we don’t need to invent new words that we can, but we it’s the, it’s the scope of the concepts that our word, you know, it’s the reference or the, you know, what is the particular meaning of a word that’s at stake when we use it in a scientific context? I mean, so I think with respect to the explanation question, so I want to just narrowly focus on the question of the relationship between cognition and the brain, if that is the focus, uh, and, and one of the core parts of neuroscience, then I guess, you know, URI points out very early in his book, which I have underlined right here. Uh, the, um, the thing to be explained should be the activities of the brain, not the invented terms and a, and that’s where in, I think we, we, that that’s where that’s the implementation and I will chauvinism that is, I think what has to be explained is, as importantly, is actually the inventive terms on the view that the invented terms are just as carefully submitted to scientific analysis research, empirical verification.  </p>



<p>David    01:35:20    That is the, the terms that we try to understand, even something as, as, uh, offensive as attention and can be extremely, you know, can be subjected to the same level of scientific scrutiny and empirical scrutiny as biology. And they are ultimately, if we’re trying to understand aspects of mental function, then those are the things that are the, and that as well, not trust, um, the activities of the brain. And that is I think, where re so I would submit that you, you must still have some of, I don’t know if we should call it outside in, but that the, that level of description needs to be sustained just as the level of description of the neuroscience needs to be sustained. And I think that, and, and that the arrow of information can not just flow from the characterization of the activity of the brain in one direction of the activity of, you know, psychology or cognition that, that, that I would feel strongly about.  </p>



<p>Paul    01:36:20    So you already mentioned that his hope and, um, he’s optimistic that the middle will disappear. Um, maybe you both, I don’t know if you both want to comment, but is there a middle and, you know, if so, will it disappear or will we, will it disappear? And if, if there’s not a middle, I suppose that means, you know, that the emergent properties that are implied, um, and described when we use psychological terms will forever be separate from the emergent properties that we describe, uh, when we use implementation level terms. So is there, is there a middle, or will it disappear?  </p>



<p>David    01:36:56    I’m not sure I understand what the middle is at the moment cruelly. Uh, so w what’s the, what’s the notice for  </p>



<p>Gyuri    01:37:03    The notion of the middle is that in order to go from the outside in and to translate what the world is telling the brain to action, that has to be the logic goes like this brains are created to learn about the world that are thinks out there that are good or bad. You have to evaluate, and you have to choose good. And once you make the decision, you can act. So this is this serial process, which you’ve got called perception. Decision-making an actual cycle. I put it the other way around. I said, there is a action perception cycles, but it’s not perception without action. That’s a big claim. And I try to make it clear why I made it. So the middle is there because it has names question, whether those names really refer to something or not, and is the decision-making process is a vague idea. And that is present and omnipotent in a single neuron, because an action potential, you can conceive it as a decision, and if you make it so ubiquitous, then it’s no longer capturing those things that philosophers and maybe neuroscientists were talking about. So this is what I’m saying, that with that kind of thing, it may go away. And I hope that if we approach the brain from inside out, we may build up a different conceptual system, perhaps with different words and different problems to attack than the traditional outside.  </p>



<p>David    01:38:48    I mean, I guess what you owe them is a sort of, I mean, this is a way to answer Jon Krakauer question, right? I mean, what you would owe is to say, I have a new conceptualization maybe with new or modulating vocabulary that actually can be an account of some mentalist phenomenon. That’s Jon Krakauer, his challenge, right? I mean, how would you end up using the infrastructure or conceptual structure of vocabulary of the neurosciences then account for the psychological phenomenon? So you, you could tell a story that way, but you still need to sort of figure out what is that chain of argumentation. In that case,  </p>



<p>Paul    01:39:27    I have a feeling that Jon Krakauer would, even if that were to occur, he would maintain that the, the term, the psychological processes are on a different level. And in the vocabulary used, are describing different, uh, properties of the phenomenon.  </p>



<p>David    01:39:41    Again, are we talking about the scientific use of the terminology or the informal use of the vocabulary? I, you know, we will not go away from that, right? We were because they are psychologically useful concepts like imagination, greed will shelf or useful. That’s how we attribute. That’s sort of the modern computational theory of mind. That is your belief system, right. Or knowledge system is that kind of terminology. And if you get rid of that, uh, what will you put in it’s place to have a conversation about how we’re, you know, about how we interact and  </p>



<p>Paul    01:40:22    You don’t want to get rid of it, right? You don’t want to get rid of it.  </p>



<p>Gyuri    01:40:26    We need two things. One is to realize that these are conversational pieces. The second one is to try to justify them. The problem David tried to criticize has been criticized even more so by my psychiatrist, wiser one, how do you derive from your sodium channels and spines ever depression or even tinnitus? I had a wonderful a couple of years ago, a rich person from Monte-Carlo invited a group of, uh, tinnitus experts. And you’re a scientist who had no knowledge whatsoever about tinnitus. And we were in prison together for a week. And we had to talk about how to solve this problem. And, uh, you know, when you confront yourself with something like this or depression or schizophrenia and said, well, how many years of my approach at this elementary level will take to get an answer? And of course, you know, I can, I have to tell my wife that, you know, we keep conversation about the words and, and modified as the symptoms.  </p>



<p>Gyuri    01:41:37    And so allowed them, don’t wait for neuroscience because then you will have to wait for a long period of time, but it doesn’t mean that’s hopeless. We are working very hard. We are making inroads in various issues, and perhaps we can build up someday a vocabulary or a symptomatology or a, uh, a drug discovery programs based on Ray rhythms. Every single psychiatric disease is associated with a different constellation of problems of returns. So what if I can find a drug that, or work on a drug that works on the Telemaco vertical, alpha, or another one that affects sleep spindles in certain segments of the brain, these are Euroscience questions. And all of a sudden that kind of drug treatment based on the treatment of the writtens will fix, let’s say, uh, one disease or several diseases simultaneously. So this is the kind of far-reaching consequences of the inside out program.  </p>



<p>David    01:42:46    Is it really inside out? I just don’t see that. I just don’t. I disagree with you on that. I mean, that’s because you started this with the very interesting point that, you know,  </p>



<p>Gyuri    01:42:54    Well, inside out in a way that you take the brain as your primary target and say, oh, let’s use the, the urinal mechanism as a test bed to invent a new drug, right.  </p>



<p>David    01:43:06    That you’re, you’re finding what you’re saying is, is, you know, the, the really the alluring possibility is that you find, uh, a taxonomy of, for instance, in this case, let’s say psychiatric disorders that have shared underlying things, let’s say because of rhythm changes in brain rhythms or dysrhythmias, or a arrhythmias for them. But that’s because of the presupposition that there would be underlyingly, conceptually similarities in the things ultimately underlying the pathology. I mean, that’s a very strong, theoretical position that you’re bringing to that task. I mean, that’s this outside in, as it gets, I mean, you have a clear theory.  </p>



<p>Gyuri    01:43:43    No, the outside, it gives you the , there are some five full with a lot of things. What I’m saying is that that DSM-V could be grounded by brain mechanisms and maybe simplified, uh, much more effectively than, uh, than putting 500 psychiatrists about the table and negotiate  </p>



<p>David    01:44:06    That’s. That must be true on, on many Reaper, many reasons. One, just to push your button on one more thing, just because, you know, I was looking through some of my notes into that, that came to my mind. So there’s a, in your chapter, that for me was very important was just the paper that the chapter on space and time, I think that’s very near and dear to concerns. I have. Um, you say somewhere where I think it’s very important, cause it’s also germane to my own particular research interests. I was paying close attention. You, uh, you say the hippocampus system may be responsible for constructing sequences of information chunks. Well, chunk content is encoded and retrieved from the neocortex. And that seems to me a very reasonable hypothesis and worth, you know, digging deep into the critical question for me is the word content, what is the chunk content? And how would you actually ask that question and go about it from, so I have a clear way of how I would go about it from the putative outside in way, but I want to know how you would go about it because it’s a little bit mystifying to me.  </p>



<p>Gyuri    01:45:14    Well, the content is a short list of things that could be simplified in our world, by the animal, running through a maze and item one is corridor one or the beginning of corridor one, and the second one is somewhere else. And so on. And these segments are concatenated into a sequential order, a firing of the neurons. So when I have a sequential activity, the question is whether those euros actually represent space, or they are pointers to the neocortex where the world is represented or is mapped. And the hippocampal sequence just helps to link together these contents into an episode. So this is an example where I would say it’s experimentally relatively easy to put your finger on what is the potential contact, because that could be measured in centimeters, or it could be measured or exemplified with corridors. I understand what you’re asking is when it comes to a more complicated issue that you would like to know how to break down the content. And my answer is always that, you know, you have to have a simple situation such as an animal running through corridors, and then we can go from that.  </p>



<p>David    01:46:43    Um, yeah, I mean, I certainly see you have the way of operationalizing the question for a very clear in this case. So sort of an animal prep and I guess the kind of you’re you’re right to point out, I would be interested in what content means in a, in a more, in a way of what I have actually as stored content, right? So I have a, I have a mental representation of the episode of this discussion, and I know that, you know, you were here in Paul’s here, I am here, and these are chunks of information, presumably neocortical and I want to know, well, how are those things done? And I can, then you have a story for how to actually connect those through Hoopa capital mechanisms into sequences. We start talking about this, we talked about that and we talked about the other thing, but for me, the, the cosmos connects us. The crucial thing is actually the original instantiation of the perimeters of this thing like that. I have  </p>



<p>Gyuri    01:47:37    Very, very difficult to raise in, in, in, I forget which paper you had, this review paper that you wrote alone about the problem of alignment and mapping. And indeed, this is an interesting thing because a mapping of course, is the big preoccupation and the hippocampus research and the hippocampal map assumes that somehow space is mapped onto a hippocampus structure and its mechanisms to serve, to represent space. The problem with this approach is that space cannot exert any effect, any effect on anything, because space is a concept. Only things can affect that physics. We have no space sensors in our bodies or in the brain, no matter how attractive it is to explain hippocampal function in terms of space, in reality, the examining relationships between things and objects and for convenience, we refer to them as space, but things are not in space. Space is the things themselves.  </p>



<p>Gyuri    01:48:43    Now, this is something that we debate among ourselves, but I think it somehow relates to what you are saying about the relationship between the primitives and the big concepts of language is because the big concepts making up language, the language is not this, but it is the primitives themselves in a particular constellation. And so when you are taking it apart and filling it with new content and tying it to Bray mechanism, this is what you call alignment. And it’s resonates with me as well. And I think we already clarified that. I don’t think that you have to, you know, you go blindly in a physiological experiment and then automatically some people come out, but you go in with a lot of discipline or naive primitives. And this is what you are trying to find, whether it matches to brain mechanisms or not.  </p>



<p>David    01:49:43    I mean, this is sort of what, what Federico and I would, you know, what we’re we we’re saying, ultimately, the day-to-day practice of these experiments is an abductive process. And that is you have some kind of intuitive notion of what I’m looking for. Then you’re really go into the nitty-gritty. Then you refine the concepts because he was sort of, you don’t do it purely inductively. Cause that has just no,  </p>



<p>Gyuri    01:50:03    Exactly. So this is what I said, that there are two major parts of the brains organization. One is what I call the good enough brain and the good enough brain is there for us to do what you just said, that under all conditions, we have an answer that is always a preconception. That is the brain always knows the answer. I cannot show you anything in this world that you will say, Hmm, it doesn’t exist because you will automatically say, oh, this is something like your brain immediately generalizes, it interprets it. The brain always interprets. And this interpretation processes, of course, what goes into the everyday business. When we go into the lab and do experiments that we interpreted, and then it requires a particular eye who’s who sees it differently than others, that this is a little bit different than what they are used to. And you know, this is not a hero, but the, uh, uh, five man would say the most important moment in the scientist’s life and says, Hmm, it’s funny.  </p>



<p>Paul    01:51:06    I thought that was Asimov. Was that fine, man?  </p>



<p>Gyuri    01:51:09    I thought it was five months from the book of a professor. You are joking or something.  </p>



<p>Paul    01:51:15    And he might’ve been quoting us. I don’t know.  </p>



<p>Gyuri    01:51:18    I don’t have a stake in it, but they, I think I agree with the principle that indeed that’s what we though, because that’s the discovery process that all of a sudden our preconceptions are confronted with something. And this is, I think the, what based in neuroscience should do now is taking those concepts that we inherited and confront them and see if we get some surprises here that,  </p>



<p>David    01:51:43    I mean, if you didn’t have a preconception going and you would never have the surprise moment, it’s because you approach it with a preconception, you say, huh, I wonder what that’s all about. Right. Otherwise it would be just description, description, description, stamp, collecting. It would just be stuff  </p>



<p>Gyuri    01:51:59    That also goes in, in neurosciences days, right? Yeah.  </p>



<p>Paul    01:52:04    Yeah. I wanted to spend more time on abduction because I think, um, I really enjoyed that, that later piece in your article, David, but we gotta go. And, uh, this is always the risk you run when you have a couple of people who are almost as smart as France, Joseph gall, but not quite, um, you know, we could clear, we could just go on and anyway, but, um, I, I hope that we move the needle and at least gave people food for thought. And you know, of course I’ll, I’ll point people to, um, both your, uh, papers that, that series of articles and the introductory article and, and your, uh, your book, which is again, um, wonderfully done. So thank you for the book and thanks for your  </p>



<p>David    01:52:45    Said at the very beginning.  </p>



<p>Paul    01:52:45    I think it’s, I hope you can say something, but the just it’s really just unbelievable, useful to have debates go back and forth. Right. I mean, even just because, you know, both of you already had the experience I’ve had to experience with the students and trainees who just, they are uncomfortable about debate now, and it’s exactly the opposite that you should have, right. To serve say, well, I just, I’m just not understanding. I need to look at it from different and that’s kind of fun. It’s engaging. And it’s, I think it’s part of our sort of professorial responsibility to say that we, we get joy from that. But people debate into, in a different way these days, it seems like more name calling and definitive statements rather than I think this, therefore that it is you are this because that, you know, so it’s, uh,  </p>



<p>David    01:53:31    Well that’s okay, but that’s kind of  </p>



<p>Paul    01:53:32    Lame, it’s lame, but it’s, I think that that might be a reason why people are less. I don’t know. I don’t really know.  </p>



<p>Gyuri    01:53:39    The other thing I think is the lack of time, no, discussing a paper for me and for my group 30 years ago, it took a day. Now we are discussing two papers in a lab meeting. So here’s the deal, David, we will pick a paper of yours and we will invite you to present it. Yes, that’s a good idea. My group is a little more vicious than the average group. And so they’re probably like  </p>



<p>David    01:54:15    That. They’re probably like my  my lab or my lab means good. Very vigorous. It’s fun. No, that’s exactly. I would love to think  </p>



<p>Paul    01:54:23    So. Anyway guys, I’m sorry. We ran out of time. Thank you so much for the fascinating conversation and you know, hopefully maybe we can do it again sometime who knows.  </p>



<p>Gyuri    01:54:30    Thank you so much for organizing this and thank you for the outside comments and thank you, David, for your effort and your kindness and your criticism.  </p>



<p>David    01:54:39    Thanks. Thanks URI for tolerating me. Thanks Paul for organizing it. And I look forward always to more. It’s great fun and it’s important.  </p>



<p>Paul    01:55:00    Brain inspired is a production of me and you don’t do advertisements. You can support the show through Patrion for a trifling amount and get access to the full versions of all the episodes. Plus bonus episodes that focus more on the cultural side, but still have science go to brand inspired.co and find the red Patrion button there to get in touch with me, emailPaul@braininspired.co. The music you hear is by the new year. Find them@thenewyear.net. Thank you for your support. See you next time.  </p>

</div></div>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/084-Buzsaki-and-Poeppel-public.mp3" length="111678964"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[
















David, Gyuri, and I discuss the issues they argue for in their back and forth commentaries about the importance of neuroscience and psychology, or implementation-level and computational-level, to advance our understanding of brains and minds – and the names we give to the things we study. Gyuri believes it’s time we use what we know and discover about brain mechanisms to better describe the psychological concepts we refer to as explanations for minds; David believes the psychological concepts are constantly being refined and are just as valid as objects of study to understand minds. They both agree these are important and enjoyable topics to debate.Also, special guest questions from Paul Cisek and John Krakauer.



Related:







Buzsáki lab; Poeppel labTwitter: @davidpoeppel.The papers we discuss or mention:Calling Names by Christophe BernardThe Brain–Cognitive Behavior Problem: A Retrospective by György Buzsáki.Against the Epistemological Primacy of the Hardware: The Brain from Inside Out, Turned Upside Down by David Poeppel.Books:The Brain from Inside Out by György Buzsáki.The Cognitive Neurosciences (edited by David Poeppel et al).



Timeline:



0:00 – Intro5:31 – Skip intro8:42 – Gyuri and David summaries25:45 – Guest questions36:25 – Gyuri new language49:41 – Language and oscillations53:52 – Do we know what cognitive functions we’re looking for?58:25 – Psychiatry1:00:25 – Steve Grossberg approach1:02:12 – Neuroethology1:09:08 – A...]]>
                </itunes:summary>
                                    <itunes:image href="https://episodes.castos.com/braininspired/images/art-buzsaki-poeppel-01.jpg"></itunes:image>
                                                                            <itunes:duration>01:56:01</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 083 Jane Wang: Evolving Altruism in AI]]>
                </title>
                <pubDate>Sat, 05 Sep 2020 10:18:00 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-083-jane-wang-evolving-altruism-in-ai</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-083-jane-wang-evolving-altruism-in-ai</link>
                                <description>
                                            <![CDATA[
<img src="https://braininspired.co/wp-content/uploads/2020/09/art-wang-01.jpg" alt="" class="wp-image-1101" />



<div class="wp-block-image"><a href="https://www.patreon.com/braininspired" target="_blank" rel="noreferrer noopener"><img src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></a></div>



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2020/09/JaneW7.jpg" alt="" class="wp-image-1102" width="225" height="256" /></div>



<p>Jane and I discuss the relationship between AI and neuroscience (cognitive science, etc), from her perspective at Deepmind after a career researching natural intelligence. We also talk about her meta-reinforcement learning work that connects deep reinforcement learning with known brain circuitry and processes, and finally we talk about her recent work using evolutionary strategies to develop altruism and cooperation among the agents in a multi-agent reinforcement learning environment.</p>



<p>Related:</p>



<ul><li><a href="http://www.janexwang.com/">Jane’s website</a>.</li><li>Twitter: <a href="http://www.twitter.com/janexwang/">@janexwang</a>. </li><li>The papers we discuss or mention:<ul><li><a href="https://arxiv.org/abs/1611.05763">Learning to reinforcement learn.</a></li><li>Blog post with a link to the paper: <a href="https://deepmind.com/blog/article/prefrontal-cortex-meta-reinforcement-learning-system">Prefrontal cortex as a meta-reinforcement learning system</a>.</li><li><a href="https://arxiv.org/abs/2007.03750">Deep Reinforcement Learning and its Neuroscientific Implications</a></li><li><a href="https://arxiv.org/pdf/1811.05931.pdf">Evolving Intrinsic Motivations for Altruistic Behavior</a>.</li></ul></li><li>Books she recommended:<ul><li><a href="https://www.amazon.com/gp/product/0525558616/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=0525558616&amp;linkId=cd8104ff9d8b83093ed20c46eaeed581">Human Compatible: AI and the Problem of Control</a>, by Stuart Russell:</li><li><a href="https://www.amazon.com/gp/product/1250118360/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=1250118360&amp;linkId=09cd942e2af91bea8a6c1c30d9a77026">Algorithms to Live By</a>, by Brian Christian and <a href="https://braininspired.co/podcast/56/">Tom Griffiths</a>.</li></ul></li></ul>



<ul class="blocks-gallery-grid"><li class="blocks-gallery-item"><img src="https://braininspired.co/wp-content/uploads/2020/09/humancompatible.jpg" alt="" class="wp-image-1103" /></li><li class="blocks-gallery-item"><img src="https://braininspired.co/wp-content/uploads/2019/12/41QotnHdoL._SL250_.jpg" alt="" class="wp-image-935" /></li></ul>



<p>Timeline:</p>



<p>0:00 - Intro<br />3:36 - Skip Intro<br />4:45 - Transition to Deepmind<br />19:56 - Changing perspectives on neuroscience<br />24:49 - Is neuroscience useful for AI?<br />33:11 - Is deep learning hitting a wall?<br />35:57 - Meta-reinforcement learning<br />52:00 - Altruism in multi-agent RL</p>
]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[












Jane and I discuss the relationship between AI and neuroscience (cognitive science, etc), from her perspective at Deepmind after a career researching natural intelligence. We also talk about her meta-reinforcement learning work that connects deep reinforcement learning with known brain circuitry and processes, and finally we talk about her recent work using evolutionary strategies to develop altruism and cooperation among the agents in a multi-agent reinforcement learning environment.



Related:



Jane’s website.Twitter: @janexwang. The papers we discuss or mention:Learning to reinforcement learn.Blog post with a link to the paper: Prefrontal cortex as a meta-reinforcement learning system.Deep Reinforcement Learning and its Neuroscientific ImplicationsEvolving Intrinsic Motivations for Altruistic Behavior.Books she recommended:Human Compatible: AI and the Problem of Control, by Stuart Russell:Algorithms to Live By, by Brian Christian and Tom Griffiths.







Timeline:



0:00 - Intro3:36 - Skip Intro4:45 - Transition to Deepmind19:56 - Changing perspectives on neuroscience24:49 - Is neuroscience useful for AI?33:11 - Is deep learning hitting a wall?35:57 - Meta-reinforcement learning52:00 - Altruism in multi-agent RL
]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 083 Jane Wang: Evolving Altruism in AI]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[
<img src="https://braininspired.co/wp-content/uploads/2020/09/art-wang-01.jpg" alt="" class="wp-image-1101" />



<div class="wp-block-image"><a href="https://www.patreon.com/braininspired" target="_blank" rel="noreferrer noopener"><img src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></a></div>



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2020/09/JaneW7.jpg" alt="" class="wp-image-1102" width="225" height="256" /></div>



<p>Jane and I discuss the relationship between AI and neuroscience (cognitive science, etc), from her perspective at Deepmind after a career researching natural intelligence. We also talk about her meta-reinforcement learning work that connects deep reinforcement learning with known brain circuitry and processes, and finally we talk about her recent work using evolutionary strategies to develop altruism and cooperation among the agents in a multi-agent reinforcement learning environment.</p>



<p>Related:</p>



<ul><li><a href="http://www.janexwang.com/">Jane’s website</a>.</li><li>Twitter: <a href="http://www.twitter.com/janexwang/">@janexwang</a>. </li><li>The papers we discuss or mention:<ul><li><a href="https://arxiv.org/abs/1611.05763">Learning to reinforcement learn.</a></li><li>Blog post with a link to the paper: <a href="https://deepmind.com/blog/article/prefrontal-cortex-meta-reinforcement-learning-system">Prefrontal cortex as a meta-reinforcement learning system</a>.</li><li><a href="https://arxiv.org/abs/2007.03750">Deep Reinforcement Learning and its Neuroscientific Implications</a></li><li><a href="https://arxiv.org/pdf/1811.05931.pdf">Evolving Intrinsic Motivations for Altruistic Behavior</a>.</li></ul></li><li>Books she recommended:<ul><li><a href="https://www.amazon.com/gp/product/0525558616/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=0525558616&amp;linkId=cd8104ff9d8b83093ed20c46eaeed581">Human Compatible: AI and the Problem of Control</a>, by Stuart Russell:</li><li><a href="https://www.amazon.com/gp/product/1250118360/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=1250118360&amp;linkId=09cd942e2af91bea8a6c1c30d9a77026">Algorithms to Live By</a>, by Brian Christian and <a href="https://braininspired.co/podcast/56/">Tom Griffiths</a>.</li></ul></li></ul>



<ul class="blocks-gallery-grid"><li class="blocks-gallery-item"><img src="https://braininspired.co/wp-content/uploads/2020/09/humancompatible.jpg" alt="" class="wp-image-1103" /></li><li class="blocks-gallery-item"><img src="https://braininspired.co/wp-content/uploads/2019/12/41QotnHdoL._SL250_.jpg" alt="" class="wp-image-935" /></li></ul>



<p>Timeline:</p>



<p>0:00 - Intro<br />3:36 - Skip Intro<br />4:45 - Transition to Deepmind<br />19:56 - Changing perspectives on neuroscience<br />24:49 - Is neuroscience useful for AI?<br />33:11 - Is deep learning hitting a wall?<br />35:57 - Meta-reinforcement learning<br />52:00 - Altruism in multi-agent RL</p>
]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/083-Jane-Wang-public.mp3" length="70633659"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[












Jane and I discuss the relationship between AI and neuroscience (cognitive science, etc), from her perspective at Deepmind after a career researching natural intelligence. We also talk about her meta-reinforcement learning work that connects deep reinforcement learning with known brain circuitry and processes, and finally we talk about her recent work using evolutionary strategies to develop altruism and cooperation among the agents in a multi-agent reinforcement learning environment.



Related:



Jane’s website.Twitter: @janexwang. The papers we discuss or mention:Learning to reinforcement learn.Blog post with a link to the paper: Prefrontal cortex as a meta-reinforcement learning system.Deep Reinforcement Learning and its Neuroscientific ImplicationsEvolving Intrinsic Motivations for Altruistic Behavior.Books she recommended:Human Compatible: AI and the Problem of Control, by Stuart Russell:Algorithms to Live By, by Brian Christian and Tom Griffiths.







Timeline:



0:00 - Intro3:36 - Skip Intro4:45 - Transition to Deepmind19:56 - Changing perspectives on neuroscience24:49 - Is neuroscience useful for AI?33:11 - Is deep learning hitting a wall?35:57 - Meta-reinforcement learning52:00 - Altruism in multi-agent RL
]]>
                </itunes:summary>
                                    <itunes:image href="https://episodes.castos.com/braininspired/images/art-wang-01.jpg"></itunes:image>
                                                                            <itunes:duration>01:13:16</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 082 Steve Grossberg: Adaptive Resonance Theory]]>
                </title>
                <pubDate>Wed, 26 Aug 2020 07:22:20 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-082-steve-grossberg-adaptive-resonance-theory</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-082-steve-grossberg-adaptive-resonance-theory</link>
                                <description>
                                            <![CDATA[
<img src="https://braininspired.co/wp-content/uploads/2020/08/art-grossberg-01.jpg" alt="" class="wp-image-1098" />



<div class="wp-block-image"><a href="https://www.patreon.com/braininspired" target="_blank" rel="noreferrer noopener"><img src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></a></div>



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2020/08/download.jpeg" alt="" class="wp-image-1099" /></div>



<p>Steve and I discuss his long and productive career as a theoretical neuroscientist. We cover his tried and true method of taking a large body of psychological behavioral findings, determining how they fit together and what’s paradoxical about them, developing design principles, theories, and models from that body of data, and using experimental neuroscience to inform and confirm his model predictions. We talk about his Adaptive Resonance Theory (ART) to describe how our brains are self-organizing, adaptive, and deal with changing environments. We also talk about his complementary computing paradigm to describe how two systems can complement each other to create emergent properties neither system can create on its own , how the resonant states in ART support consciousness, his place in the history of both neuroscience and AI, and quite a bit more.</p>



<p>Related:</p>



<ul><li>Steve's <a href="https://sites.bu.edu/steveg/">BU website</a>.</li><li>Some papers we discuss or mention (much more on his website):<ul><li><a href="https://sites.bu.edu/steveg/files/2016/06/ART.pdf">Adaptive Resonance Theory: How a brain learns to consciously attend, learn, and recognize a changing world</a>.</li><li><a href="http://www.sciencedirect.com/science/article/pii/S0893608016301800">Towards solving the Hard Problem of Consciousness: The varieties of brain resonances and the conscious experiences that they support</a>.</li><li><a href="https://doi.org/10.3389/fnbot.2020.00036%20">A Path Toward Explainable AI and Autonomous Adaptive Intelligence: Deep Learning, Adaptive Resonance, and Models of Perception, Emotion, and Action</a>.</li></ul></li></ul>



<p>Topics Time stamps:</p>



<p>0:00 - Intro<br />5:48 - Skip Intro<br />9:42 - Beginnings<br />18:40 - Modeling method<br />44:05 - Physics vs. neuroscience<br />54:50 - Historical credit for Hopfield network<br />1:03:40 - Steve's upcoming book<br />1:08:24 - Being shy<br />1:11:21 - Stability plasticity dilemma<br />1:14:10 - Adaptive resonance theory<br />1:18:25 - ART matching rule<br />1:21:35 - Consciousness as resonance<br />1:29:15 - Complementary computing<br />1:38:58 - Vigilance to re-orient<br />1:54:58 - Deep learning vs. ART</p>



<p></p>



<p></p>
]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[












Steve and I discuss his long and productive career as a theoretical neuroscientist. We cover his tried and true method of taking a large body of psychological behavioral findings, determining how they fit together and what’s paradoxical about them, developing design principles, theories, and models from that body of data, and using experimental neuroscience to inform and confirm his model predictions. We talk about his Adaptive Resonance Theory (ART) to describe how our brains are self-organizing, adaptive, and deal with changing environments. We also talk about his complementary computing paradigm to describe how two systems can complement each other to create emergent properties neither system can create on its own , how the resonant states in ART support consciousness, his place in the history of both neuroscience and AI, and quite a bit more.



Related:



Steve's BU website.Some papers we discuss or mention (much more on his website):Adaptive Resonance Theory: How a brain learns to consciously attend, learn, and recognize a changing world.Towards solving the Hard Problem of Consciousness: The varieties of brain resonances and the conscious experiences that they support.A Path Toward Explainable AI and Autonomous Adaptive Intelligence: Deep Learning, Adaptive Resonance, and Models of Perception, Emotion, and Action.



Topics Time stamps:



0:00 - Intro5:48 - Skip Intro9:42 - Beginnings18:40 - Modeling method44:05 - Physics vs. neuroscience54:50 - Historical credit for Hopfield network1:03:40 - Steve's upcoming book1:08:24 - Being shy1:11:21 - Stability plasticity dilemma1:14:10 - Adaptive resonance theory1:18:25 - ART matching rule1:21:35 - Consciousness as resonance1:29:15 - Complementary computing1:38:58 - Vigilance to re-orient1:54:58 - Deep learning vs. ART








]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 082 Steve Grossberg: Adaptive Resonance Theory]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[
<img src="https://braininspired.co/wp-content/uploads/2020/08/art-grossberg-01.jpg" alt="" class="wp-image-1098" />



<div class="wp-block-image"><a href="https://www.patreon.com/braininspired" target="_blank" rel="noreferrer noopener"><img src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></a></div>



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2020/08/download.jpeg" alt="" class="wp-image-1099" /></div>



<p>Steve and I discuss his long and productive career as a theoretical neuroscientist. We cover his tried and true method of taking a large body of psychological behavioral findings, determining how they fit together and what’s paradoxical about them, developing design principles, theories, and models from that body of data, and using experimental neuroscience to inform and confirm his model predictions. We talk about his Adaptive Resonance Theory (ART) to describe how our brains are self-organizing, adaptive, and deal with changing environments. We also talk about his complementary computing paradigm to describe how two systems can complement each other to create emergent properties neither system can create on its own , how the resonant states in ART support consciousness, his place in the history of both neuroscience and AI, and quite a bit more.</p>



<p>Related:</p>



<ul><li>Steve's <a href="https://sites.bu.edu/steveg/">BU website</a>.</li><li>Some papers we discuss or mention (much more on his website):<ul><li><a href="https://sites.bu.edu/steveg/files/2016/06/ART.pdf">Adaptive Resonance Theory: How a brain learns to consciously attend, learn, and recognize a changing world</a>.</li><li><a href="http://www.sciencedirect.com/science/article/pii/S0893608016301800">Towards solving the Hard Problem of Consciousness: The varieties of brain resonances and the conscious experiences that they support</a>.</li><li><a href="https://doi.org/10.3389/fnbot.2020.00036%20">A Path Toward Explainable AI and Autonomous Adaptive Intelligence: Deep Learning, Adaptive Resonance, and Models of Perception, Emotion, and Action</a>.</li></ul></li></ul>



<p>Topics Time stamps:</p>



<p>0:00 - Intro<br />5:48 - Skip Intro<br />9:42 - Beginnings<br />18:40 - Modeling method<br />44:05 - Physics vs. neuroscience<br />54:50 - Historical credit for Hopfield network<br />1:03:40 - Steve's upcoming book<br />1:08:24 - Being shy<br />1:11:21 - Stability plasticity dilemma<br />1:14:10 - Adaptive resonance theory<br />1:18:25 - ART matching rule<br />1:21:35 - Consciousness as resonance<br />1:29:15 - Complementary computing<br />1:38:58 - Vigilance to re-orient<br />1:54:58 - Deep learning vs. ART</p>



<p></p>



<p></p>
]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/082-Steve-Grossberg.mp3" length="130518178"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[












Steve and I discuss his long and productive career as a theoretical neuroscientist. We cover his tried and true method of taking a large body of psychological behavioral findings, determining how they fit together and what’s paradoxical about them, developing design principles, theories, and models from that body of data, and using experimental neuroscience to inform and confirm his model predictions. We talk about his Adaptive Resonance Theory (ART) to describe how our brains are self-organizing, adaptive, and deal with changing environments. We also talk about his complementary computing paradigm to describe how two systems can complement each other to create emergent properties neither system can create on its own , how the resonant states in ART support consciousness, his place in the history of both neuroscience and AI, and quite a bit more.



Related:



Steve's BU website.Some papers we discuss or mention (much more on his website):Adaptive Resonance Theory: How a brain learns to consciously attend, learn, and recognize a changing world.Towards solving the Hard Problem of Consciousness: The varieties of brain resonances and the conscious experiences that they support.A Path Toward Explainable AI and Autonomous Adaptive Intelligence: Deep Learning, Adaptive Resonance, and Models of Perception, Emotion, and Action.



Topics Time stamps:



0:00 - Intro5:48 - Skip Intro9:42 - Beginnings18:40 - Modeling method44:05 - Physics vs. neuroscience54:50 - Historical credit for Hopfield network1:03:40 - Steve's upcoming book1:08:24 - Being shy1:11:21 - Stability plasticity dilemma1:14:10 - Adaptive resonance theory1:18:25 - ART matching rule1:21:35 - Consciousness as resonance1:29:15 - Complementary computing1:38:58 - Vigilance to re-orient1:54:58 - Deep learning vs. ART








]]>
                </itunes:summary>
                                    <itunes:image href="https://episodes.castos.com/braininspired/images/art-grossberg-01.jpg"></itunes:image>
                                                                            <itunes:duration>02:15:38</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 081 Pieter Roelfsema: Brain-propagation]]>
                </title>
                <pubDate>Sun, 16 Aug 2020 10:12:25 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-081-pieter-roelfsema-brain-propagation</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-081-pieter-roelfsema-brain-propagation</link>
                                <description>
                                            <![CDATA[
<img src="https://braininspired.co/wp-content/uploads/2020/08/art-roelfsema-01.jpg" alt="" class="wp-image-1092" />



<div class="wp-block-image"><a href="https://www.patreon.com/braininspired" target="_blank" rel="noreferrer noopener"><img src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></a></div>



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2020/08/Roelfsema_3339vk_wl.jpg" alt="" class="wp-image-1093" /></div>



<p>Pieter and I discuss his ongoing quest to figure out how the brain implements learning that solves the credit assignment problem, like backpropagation does for neural networks. We also talk about his work to understand how we perceive individual objects in a crowded scene, his neurophysiological recordings in support of the global neuronal workspace hypothesis of consciousness, and the visual prosthetic device he’s developing to cure blindness by directly stimulating early visual cortex. </p>



<p>Related:</p>



<div class="wp-block-image"><a href="https://www.amazon.com/gp/product/0465026567/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=0465026567&amp;linkId=cf653bb23acd452739667f49ab5a223e" target="_blank" rel="noreferrer noopener"><img src="https://braininspired.co/wp-content/uploads/2020/08/geb.jpeg" alt="" class="wp-image-1096" /></a></div>



<ul><li>Pieter's <a href="https://nin.nl/research/researchgroups/roelfsema-group/">lab website</a>.</li><li>Twitter: <a href="https://twitter.com/Pieters_Tweet">@Pieters_Tweet</a>.</li><li>His startup to cure blindness: <a href="http://phosphoenix.nl/">Phosphoenix</a>.</li><li>Talk:<ul><li><a href="https://www.youtube.com/watch?v=zNav6js9jFg">Seeing and thinking with your visual brain</a></li></ul></li><li>The papers we discuss or mention:<ul><li><a href="https://doi.org/10.1038/nrn.2018.6">Control of synaptic plasticity in deep cortical networks</a>.</li><li><a href="https://arxiv.org/abs/1811.01768">A Biologically Plausible Learning Rule for Deep Learning in the Brain.</a></li><li><a href="https://doi.org/10.1016/j.neuron.2020.01.026">Conscious Processing and the Global Neuronal Workspace Hypothesis</a>.</li></ul></li><li>Pieter's neuro-origin book inspiration (like so many others): <a href="https://www.amazon.com/gp/product/0465026567/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=0465026567&amp;linkId=cf653bb23acd452739667f49ab5a223e" target="_blank" rel="noreferrer noopener">Gödel, Escher, Bach: An Eternal Golden Braid</a> by Douglas Hofstadter.</li></ul>
]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[












Pieter and I discuss his ongoing quest to figure out how the brain implements learning that solves the credit assignment problem, like backpropagation does for neural networks. We also talk about his work to understand how we perceive individual objects in a crowded scene, his neurophysiological recordings in support of the global neuronal workspace hypothesis of consciousness, and the visual prosthetic device he’s developing to cure blindness by directly stimulating early visual cortex. 



Related:







Pieter's lab website.Twitter: @Pieters_Tweet.His startup to cure blindness: Phosphoenix.Talk:Seeing and thinking with your visual brainThe papers we discuss or mention:Control of synaptic plasticity in deep cortical networks.A Biologically Plausible Learning Rule for Deep Learning in the Brain.Conscious Processing and the Global Neuronal Workspace Hypothesis.Pieter's neuro-origin book inspiration (like so many others): Gödel, Escher, Bach: An Eternal Golden Braid by Douglas Hofstadter.
]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 081 Pieter Roelfsema: Brain-propagation]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[
<img src="https://braininspired.co/wp-content/uploads/2020/08/art-roelfsema-01.jpg" alt="" class="wp-image-1092" />



<div class="wp-block-image"><a href="https://www.patreon.com/braininspired" target="_blank" rel="noreferrer noopener"><img src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></a></div>



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2020/08/Roelfsema_3339vk_wl.jpg" alt="" class="wp-image-1093" /></div>



<p>Pieter and I discuss his ongoing quest to figure out how the brain implements learning that solves the credit assignment problem, like backpropagation does for neural networks. We also talk about his work to understand how we perceive individual objects in a crowded scene, his neurophysiological recordings in support of the global neuronal workspace hypothesis of consciousness, and the visual prosthetic device he’s developing to cure blindness by directly stimulating early visual cortex. </p>



<p>Related:</p>



<div class="wp-block-image"><a href="https://www.amazon.com/gp/product/0465026567/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=0465026567&amp;linkId=cf653bb23acd452739667f49ab5a223e" target="_blank" rel="noreferrer noopener"><img src="https://braininspired.co/wp-content/uploads/2020/08/geb.jpeg" alt="" class="wp-image-1096" /></a></div>



<ul><li>Pieter's <a href="https://nin.nl/research/researchgroups/roelfsema-group/">lab website</a>.</li><li>Twitter: <a href="https://twitter.com/Pieters_Tweet">@Pieters_Tweet</a>.</li><li>His startup to cure blindness: <a href="http://phosphoenix.nl/">Phosphoenix</a>.</li><li>Talk:<ul><li><a href="https://www.youtube.com/watch?v=zNav6js9jFg">Seeing and thinking with your visual brain</a></li></ul></li><li>The papers we discuss or mention:<ul><li><a href="https://doi.org/10.1038/nrn.2018.6">Control of synaptic plasticity in deep cortical networks</a>.</li><li><a href="https://arxiv.org/abs/1811.01768">A Biologically Plausible Learning Rule for Deep Learning in the Brain.</a></li><li><a href="https://doi.org/10.1016/j.neuron.2020.01.026">Conscious Processing and the Global Neuronal Workspace Hypothesis</a>.</li></ul></li><li>Pieter's neuro-origin book inspiration (like so many others): <a href="https://www.amazon.com/gp/product/0465026567/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=0465026567&amp;linkId=cf653bb23acd452739667f49ab5a223e" target="_blank" rel="noreferrer noopener">Gödel, Escher, Bach: An Eternal Golden Braid</a> by Douglas Hofstadter.</li></ul>
]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/081-Pieter-Roelfsema-public.mp3" length="79103849"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[












Pieter and I discuss his ongoing quest to figure out how the brain implements learning that solves the credit assignment problem, like backpropagation does for neural networks. We also talk about his work to understand how we perceive individual objects in a crowded scene, his neurophysiological recordings in support of the global neuronal workspace hypothesis of consciousness, and the visual prosthetic device he’s developing to cure blindness by directly stimulating early visual cortex. 



Related:







Pieter's lab website.Twitter: @Pieters_Tweet.His startup to cure blindness: Phosphoenix.Talk:Seeing and thinking with your visual brainThe papers we discuss or mention:Control of synaptic plasticity in deep cortical networks.A Biologically Plausible Learning Rule for Deep Learning in the Brain.Conscious Processing and the Global Neuronal Workspace Hypothesis.Pieter's neuro-origin book inspiration (like so many others): Gödel, Escher, Bach: An Eternal Golden Braid by Douglas Hofstadter.
]]>
                </itunes:summary>
                                    <itunes:image href="https://episodes.castos.com/braininspired/images/art-roelfsema-01.jpg"></itunes:image>
                                                                            <itunes:duration>01:22:05</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 080 Daeyeol Lee: Birth of Intelligence]]>
                </title>
                <pubDate>Thu, 06 Aug 2020 12:09:22 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-080-daeyeol-lee-birth-of-intelligence</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-080-daeyeol-lee-birth-of-intelligence</link>
                                <description>
                                            <![CDATA[
<img src="https://braininspired.co/wp-content/uploads/2020/08/art-lee-01.jpg" alt="" class="wp-image-1087" />



<div class="wp-block-image"><a href="https://www.patreon.com/braininspired" target="_blank" rel="noreferrer noopener"><img src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></a></div>



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2020/08/Daeyeol-Lee-285x300-1.jpg" alt="" class="wp-image-1088" /></div>



<p>Daeyeol and I discuss his book <a href="https://www.amazon.com/gp/product/0190908327/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=0190908327&amp;linkId=84d54747cc78d3a8dfedc8fe91564dc1">Birth of Intelligence: From RNA to Artificial Intelligence</a>, which argues intelligence is a function of and inseparable from life, bound by self-replication and evolution. The book covers a ton of neuroscience related to decision making and learning, though we focused on a few theoretical frameworks and ideas like division of labor and principal-agent relationships to understand how our brains and minds are related to our genes, how AI is related to humans (for now), metacognition, consciousness, and a ton more.</p>



<p>Related:</p>



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2020/08/birthbook.jpg" alt="" class="wp-image-1089" width="166" height="250" /></div>



<ul><li><a href="https://sites.krieger.jhu.edu/daeyeol-lee-lab/">Lee Lab for Learning and Decision Making</a>.</li><li>Twitter: <a href="https://twitter.com/daeyeol_lee">@daeyeol_lee</a>.</li><li>Daeyeol’s side passion, <a href="https://soundcloud.com/ungteoriz">creating music</a>.</li><li>His book: <a href="https://www.amazon.com/gp/product/0190908327/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=0190908327&amp;linkId=84d54747cc78d3a8dfedc8fe91564dc1">Birth of Intelligence: From RNA to Artificial Intelligence</a>.</li></ul>
]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[












Daeyeol and I discuss his book Birth of Intelligence: From RNA to Artificial Intelligence, which argues intelligence is a function of and inseparable from life, bound by self-replication and evolution. The book covers a ton of neuroscience related to decision making and learning, though we focused on a few theoretical frameworks and ideas like division of labor and principal-agent relationships to understand how our brains and minds are related to our genes, how AI is related to humans (for now), metacognition, consciousness, and a ton more.



Related:







Lee Lab for Learning and Decision Making.Twitter: @daeyeol_lee.Daeyeol’s side passion, creating music.His book: Birth of Intelligence: From RNA to Artificial Intelligence.
]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 080 Daeyeol Lee: Birth of Intelligence]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[
<img src="https://braininspired.co/wp-content/uploads/2020/08/art-lee-01.jpg" alt="" class="wp-image-1087" />



<div class="wp-block-image"><a href="https://www.patreon.com/braininspired" target="_blank" rel="noreferrer noopener"><img src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></a></div>



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2020/08/Daeyeol-Lee-285x300-1.jpg" alt="" class="wp-image-1088" /></div>



<p>Daeyeol and I discuss his book <a href="https://www.amazon.com/gp/product/0190908327/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=0190908327&amp;linkId=84d54747cc78d3a8dfedc8fe91564dc1">Birth of Intelligence: From RNA to Artificial Intelligence</a>, which argues intelligence is a function of and inseparable from life, bound by self-replication and evolution. The book covers a ton of neuroscience related to decision making and learning, though we focused on a few theoretical frameworks and ideas like division of labor and principal-agent relationships to understand how our brains and minds are related to our genes, how AI is related to humans (for now), metacognition, consciousness, and a ton more.</p>



<p>Related:</p>



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2020/08/birthbook.jpg" alt="" class="wp-image-1089" width="166" height="250" /></div>



<ul><li><a href="https://sites.krieger.jhu.edu/daeyeol-lee-lab/">Lee Lab for Learning and Decision Making</a>.</li><li>Twitter: <a href="https://twitter.com/daeyeol_lee">@daeyeol_lee</a>.</li><li>Daeyeol’s side passion, <a href="https://soundcloud.com/ungteoriz">creating music</a>.</li><li>His book: <a href="https://www.amazon.com/gp/product/0190908327/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=0190908327&amp;linkId=84d54747cc78d3a8dfedc8fe91564dc1">Birth of Intelligence: From RNA to Artificial Intelligence</a>.</li></ul>
]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/080-Daeyeol-Lee-public.mp3" length="87799430"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[












Daeyeol and I discuss his book Birth of Intelligence: From RNA to Artificial Intelligence, which argues intelligence is a function of and inseparable from life, bound by self-replication and evolution. The book covers a ton of neuroscience related to decision making and learning, though we focused on a few theoretical frameworks and ideas like division of labor and principal-agent relationships to understand how our brains and minds are related to our genes, how AI is related to humans (for now), metacognition, consciousness, and a ton more.



Related:







Lee Lab for Learning and Decision Making.Twitter: @daeyeol_lee.Daeyeol’s side passion, creating music.His book: Birth of Intelligence: From RNA to Artificial Intelligence.
]]>
                </itunes:summary>
                                    <itunes:image href="https://episodes.castos.com/braininspired/images/art-lee-01.jpg"></itunes:image>
                                                                            <itunes:duration>01:31:09</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 079 Romain Brette: The Coding Brain Metaphor]]>
                </title>
                <pubDate>Mon, 27 Jul 2020 10:14:26 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-079-romain-brette-the-coding-brain-metaphor</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-079-romain-brette-the-coding-brain-metaphor</link>
                                <description>
                                            <![CDATA[
<img src="https://braininspired.co/wp-content/uploads/2020/07/art-brette-01.jpg" alt="" class="wp-image-1083" />



<div class="wp-block-image"><a href="https://www.patreon.com/braininspired" target="_blank" rel="noreferrer noopener"><img src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></a></div>



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2020/07/romain.jpg" alt="" class="wp-image-1084" /></div>



<p>Romain and I discuss his theoretical/philosophical work examining how neuroscientists rampantly misuse the word "code" when making claims about information processing in brains. We talk about the coding metaphor, various notions of information, the different roles and facets of mental representation, perceptual invariance, subjective physics, process versus substance metaphysics, and the experience of writing a Behavior and Brain Sciences article (spoiler: it's a demanding yet rewarding experience).</p>



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2020/04/gibson.jpeg" alt="" class="wp-image-981" /></div>



<ul><li><a href="http://romainbrette.fr/">Romain's website</a>.</li><li>Twitter: <a href="https://twitter.com/RomainBrette">@RomainBrette</a>.</li><li>The papers we discuss or mention:.<ul><li><a href="http://journal.frontiersin.org/article/10.3389/fnsys.2015.00151/abstract">Philosophy of the spike: rate-based vs. spike-based theories of the brain.</a></li><li><a href="https://www.cambridge.org/core/journals/behavioral-and-brain-sciences/article/is-coding-a-relevant-metaphor-for-the-brain/D578626E4888193FFFAE5B6E2C37E052">Is coding a relevant metaphor for the brain?</a> (<a href="https://www.biorxiv.org/content/early/2018/07/13/168237">bioRxiv link</a>).</li><li><a href="https://arxiv.org/abs/1311.3129">Subjective physics</a>.</li></ul></li><li>Related works<ul><li><a href="https://www.amazon.com/gp/product/1848725787/ref=as_li_tl?ie=UTF8&amp;camp=1789&amp;creative=9325&amp;creativeASIN=1848725787&amp;linkCode=as2&amp;tag=pmiddlebroo09-20&amp;linkId=656f795b7accc770360ecdb9b2b9641c" target="_blank" rel="noreferrer noopener">The Ecological Approach to Visual Perception</a> by James Gibson.</li><li><a href="http://nivea.psycho.univ-paris5.fr/BookWebPage/index.html" target="_blank" rel="noreferrer noopener">Why Red Doesn't Sound Like a Bell</a> by Kevin O’Reagan.</li></ul></li></ul>



<p></p>
]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[












Romain and I discuss his theoretical/philosophical work examining how neuroscientists rampantly misuse the word "code" when making claims about information processing in brains. We talk about the coding metaphor, various notions of information, the different roles and facets of mental representation, perceptual invariance, subjective physics, process versus substance metaphysics, and the experience of writing a Behavior and Brain Sciences article (spoiler: it's a demanding yet rewarding experience).







Romain's website.Twitter: @RomainBrette.The papers we discuss or mention:.Philosophy of the spike: rate-based vs. spike-based theories of the brain.Is coding a relevant metaphor for the brain? (bioRxiv link).Subjective physics.Related worksThe Ecological Approach to Visual Perception by James Gibson.Why Red Doesn't Sound Like a Bell by Kevin O’Reagan.




]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 079 Romain Brette: The Coding Brain Metaphor]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[
<img src="https://braininspired.co/wp-content/uploads/2020/07/art-brette-01.jpg" alt="" class="wp-image-1083" />



<div class="wp-block-image"><a href="https://www.patreon.com/braininspired" target="_blank" rel="noreferrer noopener"><img src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></a></div>



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2020/07/romain.jpg" alt="" class="wp-image-1084" /></div>



<p>Romain and I discuss his theoretical/philosophical work examining how neuroscientists rampantly misuse the word "code" when making claims about information processing in brains. We talk about the coding metaphor, various notions of information, the different roles and facets of mental representation, perceptual invariance, subjective physics, process versus substance metaphysics, and the experience of writing a Behavior and Brain Sciences article (spoiler: it's a demanding yet rewarding experience).</p>



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2020/04/gibson.jpeg" alt="" class="wp-image-981" /></div>



<ul><li><a href="http://romainbrette.fr/">Romain's website</a>.</li><li>Twitter: <a href="https://twitter.com/RomainBrette">@RomainBrette</a>.</li><li>The papers we discuss or mention:.<ul><li><a href="http://journal.frontiersin.org/article/10.3389/fnsys.2015.00151/abstract">Philosophy of the spike: rate-based vs. spike-based theories of the brain.</a></li><li><a href="https://www.cambridge.org/core/journals/behavioral-and-brain-sciences/article/is-coding-a-relevant-metaphor-for-the-brain/D578626E4888193FFFAE5B6E2C37E052">Is coding a relevant metaphor for the brain?</a> (<a href="https://www.biorxiv.org/content/early/2018/07/13/168237">bioRxiv link</a>).</li><li><a href="https://arxiv.org/abs/1311.3129">Subjective physics</a>.</li></ul></li><li>Related works<ul><li><a href="https://www.amazon.com/gp/product/1848725787/ref=as_li_tl?ie=UTF8&amp;camp=1789&amp;creative=9325&amp;creativeASIN=1848725787&amp;linkCode=as2&amp;tag=pmiddlebroo09-20&amp;linkId=656f795b7accc770360ecdb9b2b9641c" target="_blank" rel="noreferrer noopener">The Ecological Approach to Visual Perception</a> by James Gibson.</li><li><a href="http://nivea.psycho.univ-paris5.fr/BookWebPage/index.html" target="_blank" rel="noreferrer noopener">Why Red Doesn't Sound Like a Bell</a> by Kevin O’Reagan.</li></ul></li></ul>



<p></p>
]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/079-Romain-Brette-Public.mp3" length="76211915"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[












Romain and I discuss his theoretical/philosophical work examining how neuroscientists rampantly misuse the word "code" when making claims about information processing in brains. We talk about the coding metaphor, various notions of information, the different roles and facets of mental representation, perceptual invariance, subjective physics, process versus substance metaphysics, and the experience of writing a Behavior and Brain Sciences article (spoiler: it's a demanding yet rewarding experience).







Romain's website.Twitter: @RomainBrette.The papers we discuss or mention:.Philosophy of the spike: rate-based vs. spike-based theories of the brain.Is coding a relevant metaphor for the brain? (bioRxiv link).Subjective physics.Related worksThe Ecological Approach to Visual Perception by James Gibson.Why Red Doesn't Sound Like a Bell by Kevin O’Reagan.




]]>
                </itunes:summary>
                                    <itunes:image href="https://episodes.castos.com/braininspired/images/art-brette-01.jpg"></itunes:image>
                                                                            <itunes:duration>01:19:04</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 078 David and John Krakauer: Part 2]]>
                </title>
                <pubDate>Fri, 17 Jul 2020 14:36:18 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-078-david-and-john-krakauer-part-2</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-078-david-and-john-krakauer-part-2</link>
                                <description>
                                            <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2020/07/art-krakauer-2-01.jpg" alt="" class="wp-image-1074" />



<div class="wp-block-image"><a href="https://www.patreon.com/braininspired" target="_blank" rel="noreferrer noopener"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></a></div>



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2019/01/JohnKrakauerMaster_hires-1-370x370-bw.jpg" alt="" class="wp-image-635" width="200" height="200" /></div>



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2020/07/davidkrakauer.jpg" alt="" class="wp-image-1069" width="244" height="167" /></div>



<p>In this second part of our conversation David, John, and I continue to discuss the role of complexity science in the study of intelligence, brains, and minds. We also get into functionalism and multiple realizability, dynamical systems explanations, the role of time in thinking, and more. Be sure to listen to <a href="https://braininspired.co/podcast/77/">the first part</a>, which lays the foundation for what we discuss in this episode.</p>



<p>Notes:</p>



<ul><li><a href="https://www.santafe.edu/people/profile/david-krakauer">David’s page</a> at the Santa Fe Institute.</li><li>John’s <a href="http://blam-lab.org/">BLAM lab</a> website.</li><li>Follow SFI on twitter: <a href="https://twitter.com/sfiscience">@sfiscience</a>.</li><li>BLAM on Twitter: <a href="https://twitter.com/blamlab?lang=en">@blamlab</a> </li><li>Related Krakauer stuff:<ul><li><a href="https://aeon.co/essays/will-brains-or-algorithms-rule-the-kingdom-of-science">At the limits of thought</a>. An Aeon article by David</li><li><a href="https://youtu.be/HnA91mymghA">Complex Time: Cognitive Regime Shift II – When/Why/How the Brain Breaks</a>. A video conversation with both John and David.</li><li><a href="https://www.santafe.edu/culture/podcast">Complexity Podcast</a>.</li></ul></li><li>Books mentioned:<ul><li><a href="https://www.amazon.com/gp/product/B07ZDJPLD4/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=B07ZDJPLD4&amp;linkId=16519a2009243928d09873a9ff1d7222" target="_blank" rel="noreferrer noopener">Worlds Hidden in Plain Sight: The Evolving Idea of Complexity at the Santa Fe Institute</a>, ed. David Krakauer.</li><li><a href="https://www.amazon.com/gp/product/0197510264/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=0197510264&amp;linkId=39d73128189707346c55b6f16d794aad">Understanding Scientific Understanding</a> by Henk de Regt.</li><li><a href="https://www.amazon.com/gp/product/1541646851/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=1541646851&amp;linkId=d031c5c1f00485f7d203574889d951aa">The Idea of the Brain</a> by Matthew Cobb.</li><li><a href="https://www.amazon.com/gp/product/1786635488/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=1786635488&amp;linkId=1f88679448334aa2e28cd26d7138bfac" target="_blank" rel="noreferrer noopener">New Dark Age: Technology and the End of the Future</a> by James Bridle.</li><li><a href="https://www.amazon.com/gp/product/0804171009/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=0804171009&amp;linkId=9751c6a6e6311ca1b62b3ac2c60ac9e3" target="_blank" rel="noreferrer noopener">The River of Consciousness</a> by Oliver Sacks.</li></ul></li></ul>



<ul class="blocks-gallery-grid"><li class="blocks-gallery-item"><img width="330" height="500" src="https://braininspired.co/wp-content/uploads/2020/07/31YCRAuHKIL.jpg" alt="" class="wp-image-1081" /></li><li class="blocks-gallery-item"></li></ul>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[
















In this second part of our conversation David, John, and I continue to discuss the role of complexity science in the study of intelligence, brains, and minds. We also get into functionalism and multiple realizability, dynamical systems explanations, the role of time in thinking, and more. Be sure to listen to the first part, which lays the foundation for what we discuss in this episode.



Notes:



David’s page at the Santa Fe Institute.John’s BLAM lab website.Follow SFI on twitter: @sfiscience.BLAM on Twitter: @blamlab Related Krakauer stuff:At the limits of thought. An Aeon article by DavidComplex Time: Cognitive Regime Shift II – When/Why/How the Brain Breaks. A video conversation with both John and David.Complexity Podcast.Books mentioned:Worlds Hidden in Plain Sight: The Evolving Idea of Complexity at the Santa Fe Institute, ed. David Krakauer.Understanding Scientific Understanding by Henk de Regt.The Idea of the Brain by Matthew Cobb.New Dark Age: Technology and the End of the Future by James Bridle.The River of Consciousness by Oliver Sacks.



]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 078 David and John Krakauer: Part 2]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2020/07/art-krakauer-2-01.jpg" alt="" class="wp-image-1074" />



<div class="wp-block-image"><a href="https://www.patreon.com/braininspired" target="_blank" rel="noreferrer noopener"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></a></div>



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2019/01/JohnKrakauerMaster_hires-1-370x370-bw.jpg" alt="" class="wp-image-635" width="200" height="200" /></div>



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2020/07/davidkrakauer.jpg" alt="" class="wp-image-1069" width="244" height="167" /></div>



<p>In this second part of our conversation David, John, and I continue to discuss the role of complexity science in the study of intelligence, brains, and minds. We also get into functionalism and multiple realizability, dynamical systems explanations, the role of time in thinking, and more. Be sure to listen to <a href="https://braininspired.co/podcast/77/">the first part</a>, which lays the foundation for what we discuss in this episode.</p>



<p>Notes:</p>



<ul><li><a href="https://www.santafe.edu/people/profile/david-krakauer">David’s page</a> at the Santa Fe Institute.</li><li>John’s <a href="http://blam-lab.org/">BLAM lab</a> website.</li><li>Follow SFI on twitter: <a href="https://twitter.com/sfiscience">@sfiscience</a>.</li><li>BLAM on Twitter: <a href="https://twitter.com/blamlab?lang=en">@blamlab</a> </li><li>Related Krakauer stuff:<ul><li><a href="https://aeon.co/essays/will-brains-or-algorithms-rule-the-kingdom-of-science">At the limits of thought</a>. An Aeon article by David</li><li><a href="https://youtu.be/HnA91mymghA">Complex Time: Cognitive Regime Shift II – When/Why/How the Brain Breaks</a>. A video conversation with both John and David.</li><li><a href="https://www.santafe.edu/culture/podcast">Complexity Podcast</a>.</li></ul></li><li>Books mentioned:<ul><li><a href="https://www.amazon.com/gp/product/B07ZDJPLD4/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=B07ZDJPLD4&amp;linkId=16519a2009243928d09873a9ff1d7222" target="_blank" rel="noreferrer noopener">Worlds Hidden in Plain Sight: The Evolving Idea of Complexity at the Santa Fe Institute</a>, ed. David Krakauer.</li><li><a href="https://www.amazon.com/gp/product/0197510264/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=0197510264&amp;linkId=39d73128189707346c55b6f16d794aad">Understanding Scientific Understanding</a> by Henk de Regt.</li><li><a href="https://www.amazon.com/gp/product/1541646851/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=1541646851&amp;linkId=d031c5c1f00485f7d203574889d951aa">The Idea of the Brain</a> by Matthew Cobb.</li><li><a href="https://www.amazon.com/gp/product/1786635488/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=1786635488&amp;linkId=1f88679448334aa2e28cd26d7138bfac" target="_blank" rel="noreferrer noopener">New Dark Age: Technology and the End of the Future</a> by James Bridle.</li><li><a href="https://www.amazon.com/gp/product/0804171009/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=0804171009&amp;linkId=9751c6a6e6311ca1b62b3ac2c60ac9e3" target="_blank" rel="noreferrer noopener">The River of Consciousness</a> by Oliver Sacks.</li></ul></li></ul>



<ul class="blocks-gallery-grid"><li class="blocks-gallery-item"><img width="330" height="500" src="https://braininspired.co/wp-content/uploads/2020/07/31YCRAuHKIL.jpg" alt="" class="wp-image-1081" /></li><li class="blocks-gallery-item"><img width="333" height="499" src="https://braininspired.co/wp-content/uploads/2020/07/de-regt.jpg" alt="" class="wp-image-1070" /></li><li class="blocks-gallery-item"><img width="224" height="346" src="https://braininspired.co/wp-content/uploads/2020/07/cobb.jpg" alt="" class="wp-image-1071" /></li><li class="blocks-gallery-item"><img width="326" height="499" src="https://braininspired.co/wp-content/uploads/2020/07/newdarkage.jpg" alt="" class="wp-image-1076" /></li><li class="blocks-gallery-item"><img width="324" height="499" src="https://braininspired.co/wp-content/uploads/2020/07/riversacks.jpg" alt="" class="wp-image-1077" /></li></ul>


<div class="lightweight-accordion"><span>Transcript</span><div class="lightweight-accordion-body">

<p>David    00:00:01    Is there something special about brain mind, like phenomenon, a completely different from the history of scientific logical discovery. So they’ll always be outside of outreach and icon understand where that belief would come from.  </p>



<p>John    00:00:17    I don’t think that out of our reach at all, I’m just saying that the coarse grained objects we use to describe mine phenomena will not be, will not feel the same way that people who like to look at eye movements and stretch reflex, and even the cerebellum where they feel like they can couch their understanding of their behavioral output in terms of their circuitry. And all I’m saying is if that’s what you want, you’re not going to get it  </p>



<p>David    00:00:43    There on the table. Here. Three positions, at least one is that’s cool. It unfairly that sort of microscopic reductionist who says it has to be as low as you can go, which  </p>



<p>Paul    00:00:56    Is what John thinks I am.  </p>



<p>David    00:00:59    That person in the end just has the total physics envy and wants to do quantum mechanics.  </p>



<p>Speaker 4    00:01:12    This is brain inspired.  </p>



<p>Paul    00:01:25    Welcome everyone to the second part of my conversation with David and John Krakauer. I’m Paul Middlebrooks. This second part picks up right where we left off in the first part. And I highly recommend you listen to that first part to best absorb this second part. In this episode, we talk more specifically about brains and minds, how complexity thinking can help, uh, what it might look like to attain a satisfying understanding of various mental processes and how, or whether that understanding will include any account of brain processes. Uh, you’ll hear my own inability to communicate what would serve as a satisfying account of the relation between brain and mind, but thinking clearly about these things is it’s own goal. Uh, for me, that’s maybe the main goal and I’m going to keep pushing forward until hopefully I’ll get there. Speaking with David and John is a wonderful exercise toward that goal and the mode of thinking they execute, uh, makes it feel like we’re headed in the right direction. Makes me optimistic. I’ll get my own thinking to a satisfying place. We’ll all get there. Won’t we enjoy,  </p>



<p>John    00:02:45    You know, it’s interesting. I don’t know, Paul, if you’ve read the new history of neuroscience, Matthew called book the idea of the brain.  </p>



<p>Paul    00:02:52    No, but isn’t it just a list of metaphors? I I’ve not read it, so yeah,  </p>



<p>John    00:02:57    Well, no. I think actually as a scaffold for thinking, and it’s very good. I love the, the history part and the early present. Um, I think once it gets into current neuroscience and prediction of the future, it gets more impoverished, but I don’t know whether that’s Matthew cob or whether the field itself is sort of asked them to, but it is a good book. I really do recommend it. It’s got lots of delicious, rich stuff in it, and he’s done a good job. It’s not easy to synthesize all that material, but I tell you, what’s fascinating about it is it, he has a section at the end of the book, um, where he talks about the future. And it’s very interesting that he begins by talking about emergence, but then drops it like a bad smell, right? It’s like, well, he, I think he said something like emergence is that on the satisfactory explanation before we get to the real explanation, right?  </p>



<p>John    00:03:57    And then he moves on to where he feels like the real progress we made is let’s get back down to the circuits and the neurons themselves. Let’s study cognition in a fly where we have the sort of Sharon, Tony and connectivity map. And then we’ll do some sort of extrapolation cognition in humans. In other words, you see this tension in the field between not really wanting to talk about core screening and psychological terms and derive measures and saying, surely we can avoid that awful fate our field by going into a fly or a worm where we can have the same level of connectivity, detail, and intuition as we did for the stretch reflex. But now we can apply that understanding to something that we call cognition and then somehow extrapolate from that higher up than your access. In other words, you see that there’s this tension that just won’t go away and it’s like, David, it would be silly to do Navier Stokes worrying about the details,  </p>



<p>Paul    00:05:05    But mind is a, um, historically fundamentally mysterious thing because it’s there. Okay. So let me see if I can articulate my own internal struggle with this sort of mapping and you know what I want, I want some sort of, it can be course level, but I just want a way to think about the mapping between them. I don’t need it doesn’t need to come from the circuit level, but it does need to connect them. And one of the things I was going to ask you both about is whether complexity, uh, holds promise for a connecting between these levels. Or if it, if complexity, like you just mentioned is, is assigned the liberation of levels and for us to somehow be happy with understanding things at different levels without the,  </p>



<p>David    00:05:58    No, I think Bo no, I, no, I don’t think we should be happier. And John and I talk about this a lot. I think that, um, and that gets to these two kinds of emergence camps. There’s one group probably I’m in, that’s very interested in how you connect them. And there’s a cat that John made the image says what’s the best one to use in any given level. And I think that’s both necessary. So I mean, the, for us, you know, you know, the gold standard was the derivation of, you know, the ideal gas laws in thermodynamics from statistical mechanics. So that once you’ve got that equation, you don’t have to worry about the individual particles because it has that right. Property of sufficiency, but you didn’t know why. And it was, it was useful to know what I mean, some of us want to know about that, but the origin of levels, that’s sort of what I work on. And I think that that both are necessary. So I wouldn’t forfeit, um, one for the other, I hope we do get into brain mind because I have my own totally quirky ideas that I like to air. And, um, and I’ve never understood. I don’t know if now is appropriate, but John wants to jump in, but I would like to say a couple of things about that.  </p>



<p>Paul    00:07:12    I just got, I mean, we’re, we’re going on our own course. There’s there’s no.  </p>



<p>David    00:07:17    Okay. So I do want to talk about this cause I’ve listening to people I’m always amazed that they don’t do the empirical thing, which is to look for prequels or precedents. And I want to mention too, and I think I don’t have an answer all, but I just want to point out at insight, you mean historical yes. Or, or parallel in other fields, things that look like it. Okay. Okay. And, and I sort of want to be person and argue for a tragic perspective. And I, and it has come to the rescue of two other areas which suffered from the same problem. The first one, the one I know best because I work on the evolution of intelligence is evolution and it was how do you relate physical matter? Uh, and the structure physical matter, we’d put an adaptation to fitness. And for the longest time God was invoked. In other words, it was impossible. I mean, there’s no way to explain the structure of matter, uh, in its relation to function other than invoking an omniscient, omnipotent being okay. And Darren came to the rescue by introducing a third principle, which was natural selection and natural selection mediated the interaction between physical matter and fitness or replication or success. Okay.  </p>



<p>David    00:08:38    Hardware and software. Okay. Hardware physical matter, right. Just functional things in the world, adds numbers together allows you to type, it allows us to have this conversation online and what mediates them is the algorithm algorithms configure or the operating system configure physical matter to allow them to be functional. Now, David Maher sort of was kind of getting there when he recognized the three levels, right? The sort of physical level, the functional level and the algorithmic level. But he didn’t talk about it in the Percy in terms of threeness as the resolving level. And you’ll see that they have two things in common, right? So that third party means of configuring the matter to achieve the function. Right? So natural selection is not present in the organism. It’s present in the environment. The programmer is not present in the human machine, but in the environment. And so I just want to say, I don’t think mind emerges from brain mind emergently is engineered by an environment. And that’s the thing that I’ve always found missing in the mind brain is the third part, which is, I think it’s pointless to talk about mind without talking about environment in the same way that in evolution, you couldn’t talk about adaptation and fitness without talking about selection. And I find that quite promising as an avenue. I don’t know how it would play out.  </p>



<p>Paul    00:10:07    Is this like a Witkin Steiny in, uh, you must have someone to speak to else you can’t fake  </p>



<p>David    00:10:13    Somewhat. I should. It’s interesting. I feel private language issue. It is somewhat related. Here’s was much more along the lines of the scandal of induction, right? You just, I don’t know what I’m pointing at. Um, but I do think it’s social, but not social in the sense of human to human, but the ecological sense of social to environment. And, uh, I don’t know what John thinks about that, or you think about that, but that’s the bit that I’ve seen missing from a lot of the philosophy of mind,  </p>



<p>John    00:10:42    It’s such a huge area, right. You know, cultural production of mind, embodiment environment. Um, I, I definitely agree that the computation might be distributed far more than you think, but I do. I do feel like we have to worry about the brain fundamentally when it comes to the most impressive cognitive feeds that we see, for example, prospective memory that has been where you can make a cup of coffee with interruptions, you know, where you are on the sequence, you know, the sequence you have to go through to get an envelope and a stamp and then put, and then put the letter in the envelope and then you could stand with the envelope and then you walk from the post office and you put it in the postbox. I know this sounds very old fashioned, but these abilities are, are very much associated with the prefrontal cortex.  </p>



<p>John    00:11:39    You know, Steve wise and Richard passing have spoken about the fact that the, a granular prefrontal cortex only exists in primates, right? And they talk about one shot learning and prospective and all the kinds of cognitive operations and the ability to model the world in a very elaborate way that you could see in primates. So in other words, even if it’s true that there are all these, I mean, Darwin in the Matthew cup book, uh, Matthew Cub says about Darwin that he, he wasn’t really interested in how the brain and the mind connected. He actually admitted that, but he wants you to know how you could have gotten there gradually right through, uh, evolution, right. Darwin actually deliberate, explicitly tabled, how you got mind from brain, but he just said, you have to get it from there because he really shouldn’t have to operate on physical stuff. Right. So, but, so in other words, I would say all this embodiment stuff and all this it’s  </p>



<p>David    00:12:35    Different.  </p>



<p>John    00:12:35    I’m just saying it doesn’t preclude the fact that if we’re going to really understand things like cognition, as we define it, we’re going to have to understand the prefrontal cortex.  </p>



<p>David    00:12:48    Well, let me just, it’s a very interesting example of that. Just to draw your mole example to your brain example. So for you, it was sufficient. You didn’t want to talk about the genetics or the musculature of the mole limb. Um, you just want you to talk about selection pressures. Whereas developmental geneticists would say to, you know, you have to talk about the development of those limbs and, and, and the conserve structures in these, you know, regulatory kernels, and you’re doing the same for the brain. And I think you’re right, the both are required.  </p>



<p>John    00:13:21    And he just to be clear, that’s where the analogy, I would say that the prefrontal cortex, what it does, depending on how you coarse grain, it is like the claws, the fur, and the snout on the mole, right? That if, and there are theories that have been given as to why one shot learning and other such things had to be done on sparse savannas, where you just wouldn’t have a chance to learn slowly associated learning would have just you die, right? So you have to come up with a way to quickly learn and flexibly make choices. And so I just think in the end, we’re going to have to describe that behavior, have a computational theory of that behavior. And then just get confirmation. I think through correlation with properties of the prefrontal cortex, we’re never going to look at all those millions of connections in prefrontal cortex and go, ah, one shot learning prospective memory. You’re just not going to derive it anywhere. That more than from a deep neural net, you’re going to work out what it’s doing, but I think it will have some confirmatory role about your algorithmic explanation for how you do those cognitive operations.  </p>



<p>David    00:14:32    I, I think I agree with all of that. I’m just saying that, um, again, just by analogy that the dualities of matter and fitness and hardware and software actually have the same quantities, it’s like, well, how does software work exactly, um, uh, resolve by introducing the third element, the environment. And it’s hard for me to imagine a concept of mind without using, and it doesn’t, it’s not about so much embodiment or, but without using environmental social concepts, that’s the sentence in which I’m saying that you need to introduce this third element to bridge the two.  </p>



<p>Paul    00:15:21    So it’s not environment as constraint and as an organizing principle,  </p>



<p>David    00:15:26    No, it’s simply that it’s actually as a selection principle. I mean, it’s the way in which you mind, in some sense program’s brain, right? It’s sort of you, you, the, there has to be something that mediates causally.  </p>



<p>John    00:15:45    That just seems to me very similar to the Lily crap and corroding view that we just want to know what was operating on the network to get it to its final consolidated performance. It doesn’t say I don’t see how, what you’re saying is going to get us to say, I understand how perspective memory works. I, I understand how these, you know, ability to task switch work.  </p>



<p>David    00:16:15    No, it doesn’t, it doesn’t help without a chore. I agree with that. It’s a different point. It’s just, I don’t think the word, how does mind emerged and brain is, is that is complete. That’s all I’m saying. It’s just not a meaningful sentence to me without the third element, but you’re right.  </p>



<p>Paul    00:16:31    Yeah. Do you think that you would have detractors of that view? It’s, it’s almost hard to logical, right? Because you, you have to have environment.  </p>



<p>David    00:16:40    Well, it’s odd that people say it so often, isn’t it? I agree. I mean, it’s hard to imagine that there would be, but people use it all the time. How does mind emergent brain? And all I’m saying is I don’t find that a meaningful,  </p>



<p>John    00:16:53    Well, they never, they do. I mean, I think that David, I think I don’t, I mean, they want there to be just like, how do you get stretch, stretch, reflex behavior from a circuit. In other words, you see a neurologist, bam, on someone’s tendon and the arm moves or the knee moves and they want to go, how did that movement arise from spinal tissue? And people will then say, well, there are these neurons, which connect. And I think what they mean is that they want, how does the organization of parts through their interactions lead to a behavior? And in this case, they want the parts to be neurons and their configuration to be their connectivity into a circuit. And they want that configuration of connectivity through the parts to lead to the behavior. And they want to have an explanation like that all the way up to what the prefrontal cortex might do.  </p>



<p>John    00:17:53    I’ve offered, uh, a compromise by saying that if you think about it as trajectory through state spaces, derived from millions of neurons, through some sort of dimensionality reduction that you can visualize like a Fineman diagram, that you can have a functional flavored explanation that uses words and uses a NeuroLeadership object. And that is about as good as it’s ever going to get. If you want to do it in terms of connectivity. And you could argue that the Neosho Antonian project to people like Olaf Sporns is really to use connectivity metrics between macroscopic areas. The way that Sherrington talked about neurons, connecting in a reflex arc is just not going to work in my view.  </p>



<p>David    00:18:39    No. And again, so everything you just said, I agree with, I think I’m addressing a slightly different question. Um, so no one says, how does software merge from hardware?  </p>



<p>Paul    00:18:49    No one says that, but if you, um, if you had hardware and you shocked a particular part of it, and the software told you that it just had an autobody experience or a part of it and the software man, I movement and said, I intended to make that eye movement because it experienced an eye movement or a phosphene or something that we consider mind process. Where does the,  </p>



<p>David    00:19:14    But that’s, but that’s exactly the point, Paul, that you’re right. That’s exactly what would happen. Right? If you perturb the hardware, you perturbed the software. It’s exactly right. Um, but we don’t use that language that software’s emergent from hardware because we know how we make software and we know how we make hardware and we know how programming works. And that’s, I guess what I’m saying that, um, that language doesn’t feel correct because there’s a missing third element. And the question, I guess I’m asking is that the causal efficacy of the environment in mediating mind brain, uh, would it lead to a similar change of language? It wouldn’t feel right to say, does it emerge from mechanism even though John’s narrative just now sounds totally reasonable at this point in time. I mean,  </p>



<p>Paul    00:20:00    And I agree with that, but then we’re also living in the age of the brain, computer meadow. This is the most recent metaphor. I  </p>



<p>David    00:20:06    Don’t think it would have made it for people to have said this to me before.  </p>



<p>John    00:20:09    So I disagree with it entirely.  </p>



<p>David    00:20:11    I don’t think that’s the right point. I think that what it’s interesting, by the way, I guess an interest of mine, what computers do. That’s so interesting. I think, and that’s interesting, partly because we built them is that they show how physical matter can give rise to properties like chili, ology, agency and function. And it’s the first one of the first significant devices in the history of human beings. It has those properties. And so I don’t mind if the steam engine was an earlier metaphor for some element of agency, right? It’s just that the computer has it in spades. And so it’s a useful one. I don’t think coding something, a metaphor discredits it because the computer does possess so many of these properties we care about,  </p>



<p>Paul    00:21:04    But you’re mapping it onto hardware and software. And I don’t know that that is the correct  </p>



<p>John    00:21:09    That may, that may be true. A lot of people have said that that’s incorrect and so much that they’re mixed inextricably in a biological tissue  </p>



<p>David    00:21:18    As they are in the machine as they are in a computer.  </p>



<p>John    00:21:20    Right. But, but, but, but I also get it in Illinois when people conflate computational with computer. In other words, of course. And I agree with Gary, Marcus makes very strong statements. I mean about this, which is cognition is computation over representations. And I just don’t know. Now you can be in the camp of extreme embodiment or like poor Chizek who’s in denial about cognition and just tries to find data, you know, and, and all those people who just want to somehow Chuck it away and deny it and turn it into sort of some sensory motor affordance. I mean, like, I think you had, um, is it micro schooler on your show,  </p>



<p>Paul    00:22:03    Michael rescore, that  </p>



<p>John    00:22:04    Very smart guy. And I agree with him that any attempt to do away with representation is an utter failure, right? And so once you accept that you have to represent things and we can have a discussion about what that means. David and I have talked about that a lot. And you just say that you compute a representation that allows you take symbols and you operate on those symbols and change them  </p>



<p>Paul    00:22:26    That have semantic content,  </p>



<p>John    00:22:29    You know, and the numbers of semantic numerals are syntactic. Right. And I just don’t know how else you can think about it. Right. You operate over representations and you transform them. Okay.  </p>



<p>David    00:22:43    It’s interesting to point out just to both of you, that these terms, you’re using representations that are transformed. Okay. Just come out of logic and, um, which is basically what we’re talking about. We talked about computers, don’t get carried away with a particular hardware device that we’re taping on. Right. Uh, what a computational device, at least in the cheering sense has to do with, right. Is, do you have sufficient input, right? Do you have the appropriate sequence of transformations of physical matter to read, arrive at an answer that’s correct in finite time. And that’s true if I’m reaching for an orange, if I’m, you know, there’s a much more general concept of what we mean by computation, it shouldn’t be confused with the particular implement that we happen to be operating on today.  </p>



<p>John    00:23:32    I also think there’s something that I struggle with, but I think is fundamental. This is there’s probably an ontological reason. I mean, the other nice thing about the Matthew Cod book is he just shows you that very thoughtful people going back to the Greeks and onwards, but worried about the mind brain divide, they were never worried about the, the equivalent divides in their legs or their arms. Right. In other words, there was always a sense that there was something that had to be law, Adrian versus Kenna’s Creek. They had this debate. So there’s the, the sophistication of the discussion of the difference has not increased. I think the only real insight is that algorithms are by definition, substrate independent. Okay. That’s, that’s what an algorithm is. It’s a series of steps that abstract away from how they’re physically instantiated and Abacus the calculator, your fingers. Right. Right.  </p>



<p>David    00:24:33    But notice again, it’s interesting that the one concrete, tangible example we have of the interface between the logical seemingly immaterial and the material is that one, I think John’s absolutely right. So when you talk about algorithms, they have precisely that property that we’re trying to pursue. It’s not that hardware is brain software mind, not at all. It’s just that they give us a vocabulary and a set of fairly well understood real physical devices that have some of the properties that we’re pursuing.  </p>



<p>John    00:25:12    Um, and the interesting thing is, is that the more mind, like the words you use, the more mind phenomena you care about poor, the more substrate, independent algorithmic you can sound. Right. In other words, it’s it, you’re not going to write a poem or a story about the stretch reflex, and you’re not going to necessarily come up with a substrate independent description of the stretch reflex anymore. But the more complex the behavior becomes, the more one can begin to use the capillary that floats free of the substrate. Right. It’s back to what we were talking about before. Why is that? Why is it that you can get more and more free of the actual substrate and more algorithmic, the more cognitive and mind lake you become?  </p>



<p>Paul    00:26:07    Well, one answer to that is our cognitive limitations. Hmm.  </p>



<p>John    00:26:13    No, because we actually do quite a good job just like William James did. I’m just saying, you, you say that because you’d like to have a neural connectivity story about,  </p>



<p>David    00:26:24    I mean, this, this, this cognitive limitation thing is tricky. Right. Um, and certainly in relation to this question, you know, are we smart enough to be able to understand what mind is, et cetera. And it’s again, I just want to, I was thinking in terms of empirical precedent, right? It’s important to point out. The example I gave for functional states of matter was not resolved until the 19th century. Okay. That’s quite recent. So there’s a temporal nature of limit, right? Einstein’s theory of general relativity, which on 1915, we didn’t understand the nature of space time until the early 20th century now. And couldn’t have done it without Raman, which has happened in the 1850s. So there is a temporary aspect of this. So that’s very important. The question is, is there an absolute, right? Is there something special about brain mind, like phenomenon that are completely different from the history of scientific logical discovery? So they’ll always be outside of outreach and I can’t understand where that belief would come from.  </p>



<p>John    00:27:29    We did that out of our reach at all. I’m just saying that the coarse grained objects we use to describe mine phenomena will not be, will not feel the same way that people who like to look at eye movements and stretch reflex, and even the cerebellum where they feel like they can couch their understanding of their behavioral output in terms of their circuitry. And all I’m saying is if that’s what you want, you’re not going to get it  </p>



<p>David    00:27:54    Right. That’s absolutely right.  </p>



<p>Paul    00:27:56    Yeah. I think that might be right as well. But I wonder if there is a happy medium where going back to, um, and I don’t mean to percent rate on this, but you know, just from a very selfish standpoint, I still would like a mapping. It doesn’t have to, I don’t have to map it onto the circuit, but I want just a way of formulating a question, you know?  </p>



<p>David    00:28:17    Cool. Let me just, it’s interesting though. Do you feel that way about the concepts of temperature and heat?  </p>



<p>Paul    00:28:24    Well, I was going to say, you said, um, now that we understand space and time, and I don’t know that we understand space-time yet. Right,  </p>



<p>David    00:28:31    Right. We do. We know  </p>



<p>Paul    00:28:33    We actually have a better explanation.  </p>



<p>David    00:28:35    Yes. But let me just, that’s absolutely true. I think all of these theories are very approximate to them and they get better and better, but I just want to get to that point that you make about, I guess we’d have to call this something like satisfiability or something, which is what you want this molecular. And I, I’m just curious because I do too. I’m, I’m the person who’s interested in mappings. Um, but it is interesting that I want to just ask you, is this a general feeling you have, or is it special for my own brain? Because is it important to you that there is a statistical mechanical theory that explains the bulk average properties of molecules and their energy that allows you to use concepts like temperature and pressure. And why does it matter to you?  </p>



<p>Paul    00:29:23    So sorry. Does, does it matter that I use the core screened explanation of what heat is to perform work? Is that the, sorry? Is that the question?  </p>



<p>David    00:29:31    I think it’s, again, just sort of mediating between you and John’s position on this, which is that we now know of course, that there is such a connection and it’s very important, um, that justifies in some sense, the higher level theory, but for most people doing work, they’re quite happy to deploy the high level theory and telling them there’s one errant molecule in a room. It doesn’t do it for them. It doesn’t make much difference. And I guess that’s what I’m asking is, is it, do we feel once that the, you know, that renormalization has been done right. That we can dispense with the micro?  </p>



<p>John    00:30:11    I mean, and they may give you an example of poor. I think, you know, I did a lot of reading on the sort of the philosophy and the history of the action potential. And it’s very interesting that in, um, what’s his name, he wrote that history of neurology in the 1950s, um, blanking shouldn’t have had that grassy red wine. But anyway, at one point he says that the action potential was a huge advance that would help us understand how the brain works,  </p>



<p>Paul    00:30:43    Your production. No,  </p>



<p>John    00:30:44    No, no, no, no. But I find those kind of statements fascinating because what they do is they take a very useful horizontal piece of work that locally describes transmission and then makes this huge vertical claim for it. Okay. And what I’m saying is, is I can’t decide whether you’re saying you’d like the mapping is just because the whole field wants to always have a vertical claim for horizontal work and you know, and the history of the action potential, you know, in 1952, in that people was published, they didn’t know about ion channels. They just knew that there were sort of voltage sensitive changes in permeability of the membrane and write out an equation of the propagation of the actual potential. Now, if you were to still explain to someone how an actual potential works, you wouldn’t start describing the details of the ion channel sub units.  </p>



<p>Paul    00:31:44    It depends on what level you’re explaining.  </p>



<p>John    00:31:46    If you just want you to explain the action potential propagation, I can assure you, you will not write a sentence where you include the iron channel composition.  </p>



<p>David    00:31:56    Here’s another one  </p>



<p>John    00:31:58    I’m trying to finish that. So in other words, it’s nice to know that the w the reason why you have permeability changes is due to the existence of my own channels. It’s nice to know that there’s something they’re doing it, but to actually explain how you get the action potential propagating, you don’t need to know that detail. So, in other words, when you ask your question, you have to ask it in two ways. Does that detail simply give you solace that there’s a foundation upon which this abstraction is built, or does it actually add substantially to the sentence of understanding that children, when it comes to the action potential? I’m going to say to you the answer, Paul is no,  </p>



<p>David    00:32:49    Here’s an example. Okay. No, that’s good. It’s good. Here’s, here’s another example. I think it too, I want to be, I hate to be in the middle of such a new thing like this, but I think I have it all done by the crackers. Not cause I’m sort of somewhere between you and John on this, which really annoys me. I want to be more extreme than both of you, but it’s the natural selection is a good example. So when Darren formulated the theory, he had this Nachi theory of genetics, he had serious pan genes, and it was based essentially on a fluid metaphor. It was continuous, but it’s called blending inheritance. And it didn’t in any way. He’s completely erroneous theory of how inheritance works by the way, completely erroneous, um, compromise the integrity of his higher level selection there. Yeah. And of course, during the modern synthesis people at right and holding came along and said, you know, it doesn’t work, man.  </p>



<p>David    00:33:40    Blending inheritance will not work. Let’s produce this kind of average quantity in the world. And they then reconcile the theory with Mendel’s contributions, which are articulate and so on. And now two points to make at the level of organisms. It made absolutely no difference. It didn’t compromise. The theory, Darren C was not changed by right. And holding what they did is reconciled genetics with the theory and the theory of population genetics, which tries to explain their distribution of genes using natural selection does have to have both that’s critical. Right? So if the object under analysis is the gene, of course, but at the phenotypic level sometimes called the phenotypic gambit. You can kind of get away with it, but ignoring it and gain theory evolutionary game. So he doesn’t have any genetics in it. So it’s worth bearing in mind. It’s very level dependent in terms of what you should and should not include.  </p>



<p>John    00:34:36    And, and, and I think that the mistake that is made all the time is confirmatory, reconciling facts do not figure in the explanation and those get collapsed, right? And, and, and the, the existence of iron channels is a nice confirmation and verification. It may help you poison someone, but it doesn’t change the qualitative nature of the way you think about the propagation of an action potential. You just need to know about varying voltages and capacitance. Do you see? And, and so, in other words, when you ask your question,  </p>



<p>David    00:35:12    Wait, what’s the question, I forgot the question. Now, we all have  </p>



<p>John    00:35:17    The real question is, is you want that to be some mapping between presumably structures and circuits and mind phenomenon. And I’m just saying that I don’t always have an intuition, why that mapping between level and minus one and level N is going to qualitatively change the intuitive nature of the explanation you construct a level.  </p>



<p>Paul    00:35:43    My bet is that there is a in plus one or two level between these two that is satisfactory. I mean, so this, we can go back to David’s question about understanding, uh, heat versus the molecular collection of molecules. And, and that actually, so, so I’m fine with that. I can, I can use heat and I don’t need to understand the molecules to use the heat. And, but I, but I also wonder, because I mean, I don’t understand heat at, you know, of core level, but I use it a lot. And so I have a sense that I understand it, and I don’t need to explain the molecules to use it cause it’s, I can always use it the same way and I can take, you know, there, there, you know, I can take the, um, uh, the explanation I can take that mapping and think, okay, I’m satisfied with that without being an expert.  </p>



<p>John    00:36:40    You said, do you think, did you think that if you, if I, if I asked you that the dog chased the cat, right. And I said, do you understand what I said? And you went, yes, I know what that means. And then I said, but do you understand the particular syntactic structure of English that tends to be subject verb object? And did you know that there’s this universal feature, these are syntactical rules of English. And I said, so you don’t really understand the dog, chase the cat as well as I do, because I’m a linguist who can talk about syntax and objects and subjects and verbs. It would be very odd thing for me to say, right? It’s not, I know extra facts about language and I can use, but to say that you would understand the dog chased the cat better. If you were a linguist would be a very odd thing. And that’s what you seem to be forced to adhere to.  </p>



<p>Paul    00:37:38    So I actually, I, we can have a running bet I believe, and I don’t know that we’ll get, I kind of doubt that we’ll get there in my lifetime, but I believe that there is not some sort of one-to-one correspondence where I can look at a circuit and the, and know the 1 million, 250,000 neurons firing in this particular pattern corresponds to the feeling of love or something. I don’t think that there’s going to be that mapping. That’s not what I’m looking for. And I think you’re misconstruing my desire as that as like mapping onto the physical substrate, what I’m betting on. And I believe that will, there will be described one day is an in between way.  </p>



<p>John    00:38:19    I gave you that I gave you, I told you about trajectories and state spaces, dynamical ordinances.  </p>



<p>Paul    00:38:23    It goes exactly back to that, but that’s a usage case. And I, but that hasn’t happened in mind yet. I mean, it’s, that’s happened looking at state trajectories  </p>



<p>John    00:38:35    Who are coming up with similar kind of dynamical systems you have of prefrontal cortex and beginning to talk in that way. Uh, so in other words, it wouldn’t, it’s so far you’re right. It’s been sort of convolutional neural networks for vision and recurrent neural networks for  </p>



<p>David    00:38:52    Motor cortex. But I have a feeling that, you know, there are people like Charlene Wang and others who are beginning to worry about prefrontal cortex. And so it may well be that you’ll have an object that is a mixture of psychological language and neurodynamics, so it would satisfy you. I want to add something else to this conversation now, which is functionalism and degeneracy. Cause I think when complex systems it’s right.  </p>



<p>Paul    00:39:19    Sorry. Wait, so just on, in complex systems,  </p>



<p>David    00:39:23    I have no, not correct. It’s right. To have this debate because I feel that even if you adhere to, and I’m just going to caricature, this is Paul versus John here. Right. I don’t think it’s fair to say because, but nevertheless, um, there’s another one which is completely orthogonal to this. So if you think about telescopes, right, there are radio telescopes and there are optical telescopes. You think about cars, there are electrical cars and there are cars that use the combustion engine. They are not at all the same, not at all. They use completely different principles. They achieve one  </p>



<p>John    00:39:56    Second,  </p>



<p>David    00:39:58    One second per second. Yup. They achieve the same objective. So, um, functionalism. So now if we’re talking about, uh, mind phenomena, I think there’s an argument that deep neural networks, which have absolutely nothing to do with brain. I mean, really nothing. And, uh, certainly not at the material level, not at the level of mechanism, the geometric topological correspondence is spurious. Uh, maybe in some cases, maybe it isn’t, um, we can all agree on that. We probably agree on that. Although there might be some cases, uh, probably going to give us much deeper insights into mind, the neuroscience. And we haven’t talked about that. So that’s not about mind emerging from brain matter. That’s mind emerging from something completely.  </p>



<p>John    00:40:46    But I mean, it’s been a bit, but it follows. I mean, the thing is David is if you believe in terms of psychological algorithmic, uh, descriptions of mind phenomena, it kind of follows that you’re you could get them in some other way. Now there are some who say no, the one way that, you know, again, because I’m actually used to be. And I think Paul knows is, you know, very much default functionalist, but I’m willing to believe now that you can have what David Barack and I equally neuro functional explanations, they’re functional explanatory objects with mural flavor, right. That you can have them both. Okay. So in other words, I think that the question is, is whether to have that neuro functional object, you have to have clear, for example, what if it turns out that even though the explanatory object is quite abstract and it’s a dynamical system plus words, but what if actually the tissue itself has properties that you need that there’ll be on neural populations and abstractions of connections, vessels clear local field potentials, effective transmission in others. It may be that the economical object that you end up coming up with can only be built out of biological tissue.  </p>



<p>Paul    00:42:11    David D does this accord with your view of the environment playing an interactive part in this, or is it a separate issue?  </p>



<p>David    00:42:18    A separate issue? I don’t agree with John and I can’t really think of because of universality. I can’t really think of any, anything like it. Um, and the idea is I understand it is that there’s something super special about molecules, which mean that functions, which are very divorced from them that operate at very aggregate core screen levels are actually dependent on them. So it’s sort of getting your, it is what you want. Paul, it’s having your cake and eating it theory, but I don’t quite know how that could work. I’m not aware of any such physical,  </p>



<p>John    00:42:51    I mean, to just understand it, you’re saying that you think that if I come up with some neuro functional object by definition, you should be able to swap out the constituent.  </p>



<p>David    00:43:02    Exactly. Yeah. In strong functionalist language. Yes. Yes. I think so.  </p>



<p>Paul    00:43:08    Just to mediate between you two then. So John, I, I tend toward this now as well, um, that there, there may be, you know, something that is, and this goes back to like the, the critical point of operation and what it takes to be in that area of operation. And it could take something as, I don’t want to say complex because we’re talking about complexity, but as massively intertwined and evolved over such a long period of time to sit at that right state, it might take the metabolism and the structure of  </p>



<p>David    00:43:40    It. Doesn’t go, doesn’t pull. So I’m someone who’s worked on critical points.  </p>



<p>Paul    00:43:45    Well, no, not, not for just critical points, but I mean, something like mind, right? So there’s lots of things that operate at critical points that aren’t mind.  </p>



<p>David    00:43:53    Well, that’s a critical insight, which is excuse the pun, which is that this is precisely the point. Um, people got very excited about things like having tales, rage, and then they realized that, well, actually we have no central limit theorem for that too. And so that’s not a surprise critical points, got people excited, rightly and people like John beg and others, who’ve been arguing for the brain being by a critical point. But now we know of course it local area networks by a critical point and social systems are by a critical point. In fact, everything that’s evolved is bad, critical point,  </p>



<p>Paul    00:44:27    Small world and critical  </p>



<p>David    00:44:28    Point. Right? And so actually I don’t think that these features, they are fascinating by the way. Um, but I don’t think there are the tool that allows us to distinguish between, you know, mind, brain, light phenomena and other complex phenomenon. They’re just too ubiquitous. So I think criticality is a bit of a red herring. Moreover, uh, it’s now been shown that, uh, deep neural networks are nowhere near critical points, right. Which have many of the characteristics that people are interested in mind are interested in and there’s a, you can actually contrive statistical models where they are, but none of the learn trained ones are. So, um,  </p>



<p>John    00:45:10    What I was saying is actually, it’s not true so far, you know, there’s been no successful, really cognitive general AI achievement. And, you know, as I was saying, Jeff Hinton says, you know, that’s what the last thing we’re going to get.  </p>



<p>Paul    00:45:25    And all I’m saying,  </p>



<p>John    00:45:27    Now the question is, what is the impediment? Is it architecture? Is it not knowing the right algorithms? Or is there something that you can currently make with biological tissue that obviously by definition, I’m not trying to say that you couldn’t abstract away an object that behaves like the objects that currently only neural tissue can make. But as long as once we work out what it’s made, what that object looks like, we’ll be able to make it in another way. I think you seem to be saying that by definition, if you can abstract to an algorithmic level, if you can come up with a core screen description by definition, it should be made out of different. It should be duplicable with a different sub.  </p>



<p>David    00:46:18    Yeah, I do believe, I do believe that I do not. I think your first part of your argument, I think I share, which is that we’re just not sufficiently clever engineers, right. To know how to do that. And, um,  </p>



<p>John    00:46:31    We’ve met with, we’re still missing something  </p>



<p>David    00:46:33    Missing a lot.  </p>



<p>Paul    00:46:34    Can I ask you guys that kind of a ridiculous question, but a break from the seriousness maybe, but, but this, I just had the other day, this daydream where I imagined a functionalist future, where we all accept functionalism, we build powerful AI and we accept because of their predictive ability. We accept that they have better purchase on our own interests and it seems to be, and we allow ourselves to be governed by their  </p>



<p>David    00:47:02    Organizations, but we already are well. Okay.  </p>



<p>Paul    00:47:04    Um, but let’s say it’s more concrete and more, I mean, that’s, that’s a whole different conversation, but, but okay. Let’s say everyone in. Anyway, the, the, the dystopian vision I had was where we accept a functionalist account, everything that they are doing, it makes it seem as this. Uh, now I just realized this is like the terrible zombie, uh, analogy, but, but it seems, you know, we interact with, you know, they’re their robots, whatever, you know, whatever, pick your favorite television show. And we, we allow for the fact that we assume they have consciousness and mind and on her level, whatever that means. Um, and so we could be in a place where we’re actually giving ourselves up to the organizational principles of these things that we functionally define as having minds. But in reality, there’s, you know, there’s, it’s vacant. There’s nothing there.  </p>



<p>John    00:47:56    I just think it’s, I think that’s completely impossible. That’s, that’s an example of giving an example. It just doesn’t make any sense on  </p>



<p>Paul    00:48:03    Its face. I realized that the zombie  </p>



<p>John    00:48:06    Mind, you know, just like when, uh, lake and Gershman, and Tenenbaum wrote the BBS paper on what you, what would be needed to have general AI. And they basically come up with a set of behavioral criteria, you know, and it, it’s, it’s very similar to arguments and I wouldn’t dare go there with David here about what life is, you know, do you get, is it a different, a defined property or is it a cluster of properties or whatever, but I think that if you had your tick box, your checklist, as they had an air BBS article about what would be necessary, intuitive sociality, uh, intuitive physics, uh, modeling of the world, rather than classifying it one short learning, you know, extrapolation planning. I mean, whatever that list entails  </p>



<p>Paul    00:48:58    Four out of five or so  </p>



<p>John    00:49:00    They, um, and you had these robots that did that. They have mine as far as I’m concerned.  </p>



<p>David    00:49:08    Right. So we’re agreeing then we’re agreeing. I think there’s this very, there’s this interesting question. I think I’ll give another example from computing, which I’m not sure it’s a good one or not, but so it might be that one day there’s a certain class of computational problem that can be solved by quantum computer. So, right. So there are problems now that we might call MP complete. I don’t know if this is true or not, or at least extremely difficult to compute in any reasonable amount of time that a content computer could compute in our lifetime. And that would be a good example. I think of John’s position where this class of function, which you could describe hardware independently, simply couldn’t be realized in anything that had this property of entanglement and spooky action at a distance and massive parallelism that comes out of the quantum to me. So that is perhaps an example of where the physicality imposes constraints and what’s realizable in the logical space. But as far as I’m concerned, unless you believe as Penrose does that this applies to mind brain, which it made, I don’t know, everything we’re talking about is classical. So then I’m not aware of any fundamental physical limitation that’s analogous in, in mind brain. So I, that’s why I think I’m a functionalist. Does,  </p>



<p>Paul    00:50:27    Um, does timing matter to speed of information processing matter because you could have the exact same structure and do it very slowly? Is there a role, cause I know you’re very interested in time and that’s part of the complexity story as well. David, how do you, you know, like, um, John mentioned, I had Erie Hassan on and he talks about at different hierarchical levels in the brain operate on different timescales, seem to map on operating on two different timescales. And I thought, well, you know, there could be something to that, but the more recurrent something is it could operate on a slower dynamical timescale, and somehow that has a maps onto cognitive processing. But, but I wonder if, if you think of time, that way as well in the information processing and computation,  </p>



<p>David    00:51:15    I, I th I have a map I’ve had a much more modest approach to timescales and computation. So I’ve worked a lot on molecular computation, molecular information processing where time is exploited. So the half-life of a molecule is actually part of your box of tricks. Right? You can use that to solve problems. You can, you can actually make a frequency decoder by exploiting relative decay times. And so I’ve kind of entered that ingenuity of messing with timescales. Um, and clearly life is all about that, right? It’s just been tinkering from the beginning with these properties of molecules and all of these timescales. Um, but that’s not the same thing as saying it has to be done that way. And so, you know, when human beings play chess, you know, we have a brain with all these timescales in it, right. With the timescale of, you know, synaptic chemistry and the timescales of, and so on. Um, but you know, AlphaGo works at equilibrium, right? I mean, once it’s been trained, it’s, it’s basically there is no timescale, right? Um, time’s gone. So we do know that you can solve complicated problems with no temporal dynamics that are interesting. So I don’t know, but at the scales that I care about things often, it matters a lot.  </p>



<p>Paul    00:52:34    Doesn’t matter for mind over matter.  </p>



<p>John    00:52:36    I mean, again, um, times, uh, hierarchical systems as you go up the hierarchy, they operate more slowly on, on, because that’s, I mean, that’s the whole point of a hierarchal system is the time arises. You operate on, go up as you go up the hierarchy. So, you know, you, you could argue that when you have, when you are worrying about way of going to college in a few years’ time versus your stretch reflex, those are just years versus second.  </p>



<p>Paul    00:53:09    Does that map onto our experience though? What do you mean mental experience? Do you know our sense of time and, um, the rate at which we are thinking, right. So there’s, so this is this mapping from brains to mind that I want,  </p>



<p>David    00:53:24    I think it does matter. It’s interesting. You say this as a paper that I’m Jeffrey Western, I’ve been thinking about raging for years, which we never will. I don’t know, maybe, um, which is this, which is sort of interesting, right? So you can say, you know, Jeffrey has this very nice result, which is that smaller organisms have higher heart beat rates. We know that’s not his result, but, uh, if you rescale things, according to the allometric theory, the total number of heartbeats in a lifespan is more or less invariant. So there’s a bit like a photon being, massless this thing that pops out, which is quite surprising, right? So a very tiny organism, just beat smash more quickly than us and lives a shorter time. And we beat slowly and never longer time. It turns out that all sums up to the same number of heartbeats. It’s just got some shocking result, but falls out of the theory. And the question we’ve been discussing is maybe we, there are similar invariances with respect to thought that a mayfly or something that seems from our perspective to live only a few days actually thinks it lives a hundred years, right? From the mayflies perspective, it feels the same.  </p>



<p>John    00:54:33    Let’s come to evidence, you know, from Parkinson disease. Right. You know, again, all of the sacks talk a lot about these patients in awakenings. He tells a funny story where somebody, um, he sees somebody like this in the waiting area and he asks them what you’re doing. I see that was about, I think I was going to scratch, I’ll pick my nose. Right. And basically he just caught him in this extreme, the drawn out. But that the point has been made that they don’t feel that they’re taking forever to pick their nose. Right. So maybe your subjective experience of time does relate in some way to the speed of your physiology.  </p>



<p>David    00:55:22    Right? So that exactly, that’s the question. So in the case of heartbeats, it’s a very simple calculation by the way, but we would do this properly for thought, the way we have to do it is we have to calculate, you know, distances between neurons, how quickly an impulse is propagated, et cetera, to see whether or not effectively as John just pointed out the sensation, the subjective sensation of time was an invariant that falls out of allometry. We’ve kind of cool and useless,  </p>



<p>John    00:55:52    Um, you know, cooling, cooling, nuclei, tooling, the brain. Maybe it’s looking at the speed of computations with, with cooling,  </p>



<p>Paul    00:56:02    But you might not get, I say, you guys are talking about synaptic transmission rates and it might be more of a recurrence architectural feature, you know, circuit a circuit level.  </p>



<p>John    00:56:11    Right. But I think that what’s great about this conversation. If I may say, is it your consistent requirement for something? I think, I don’t know whether it’s the wrong question. In other words, that minded brain will always have their separate vocabularies and their separate conceptual frameworks that do. And they, and we simply have to feel reassured, like David’s saying, talk about temperature, talk about volume, talk about pressure. It’s better to talk about weather prediction to those terms and just feel reassured that it’s consistent with statistical mechanics. And I wonder whether the only way that we’re going to get some neural information into our functionalist explanations is that they’ll look a little bit like a dynamical system is my guess. I mean, I’m beginning to be willing to believe that we might be, we might be able to think we might have a Fineman diagram way of thinking about things about mind, which are very heavily derive from your old data.  </p>



<p>John    00:57:20    And David Barack, as I said, I’m working with has convinced me that maybe you, we would at least be happy with neuro functional objects, not functional ones. So in other words, you don’t have to be a pure function. I mean, functionalism has two meanings. One is you think just in terms of processes rather than processors, the strong version of functioning as David’s one, which is that there should be many ways to implement it, that isn’t wedded to one physical implementational instantiation, but I don’t think you’re necessarily wanting that. I think he’d be happy if you just had something in the explanation you gave to people that had something that was neurally derived in the explanation, right there, a few psychological words, and I’m actually, I’m being very serious. Maybe we’ve reached a point where we’ll have not just psychological functional words, we’ll have neurone derived objects in the sentence, just like we have the motor neuron in the stretch reflex sentence. I I’m really not sure whether that would count as what we call in the paper at first level explainer.  </p>



<p>David    00:58:28    Hmm. It’s interesting. I obviously hear it as a five. There were phases where people became very enamored of dynamical systems. Um, and I think we’re over that a little bit. And I think that the it’s interesting. I, I’m not sure this is exactly the same thing, but I’ll give an example. It’s an argument I had at a meeting at Harvard with George Whitesides, and we were talking about the merits of information theory versus dynamics. And he works in, in, in nano biology and an extraordinary engineer and, and, and cell biologist. And he hated information theory. And I think I had a similar argument with the soak Institute with, I don’t know if it was Sutton about this. It might’ve been people like dynamical systems, cause it feels closer to the matter, you know, it’s got that quality about it. And George felt that why do you need information theory?  </p>



<p>David    00:59:24    It’s just a secondary and position. This observer dependent. Just describe it all in terms of the dynamics, these functional concepts that you are imposing a useless the system, isn’t doing anything it’s just colliding. It’s just a being Newton’s laws. And so genetical systems is the right way to describe it. So it’s a kind of a weird reductionism. That’s not microscopic reductionism. It’s kind of John is describing, but the point about genetical systems of the kind that we study is you can’t always rate Hamiltonian. You can’t always rate an energy function down. So you don’t always have an action principle. So you can’t always say, this is what the system is minimizing. That’s the point to which it is tending. And what Shannon gave us was a framework where we could actually write down a variational principles on top of dynamical systems. So you can say, as a BLM will do, he’ll say that genetical system is maximizing mutual information or tissue. We’ll say that genetics is implementing the information bottleneck or what have you. And so you need this language to give you the variation architecture, but as the optimization language, you don’t have internet,  </p>



<p>John    01:00:34    But, but why do you, in other words, again, when we use narrative or words like William James did, no formulism attached to it, but you’re still doing understanding. Well, I mean, you look at even martyr’s incredible work on this dramatic gastric band, right? Where she shows unbelievable redundancy in what the constituent neurons do, but there’s an invariance at the level of the pattern and the pattern and, you know, Eric Smith, your very own Eric Smith has made a beautiful case for ecosystems and in physics that you should treat the pattern as the entity of explanation rather than component processes. So when you look at Eve Marta’s work, it’s the pattern generated by a lot of swapping out that you can do at the level of it. So the invariance isn’t at the level of the components? No, I don’t think so. In other words, why can’t we just, all I was saying is if there was a neural pattern language.  </p>



<p>David    01:01:35    No, I know. I know what you’re saying.  </p>



<p>John    01:01:37    Isn’t that? Okay.  </p>



<p>David    01:01:39    Yes. No, there’s nothing. That’s great. I’m not, I just want to make a point here that, um, there are on the table here, three positions, at least one is let’s call it unfairly. The sort of microscopic reductionist who says it has to be as low as you can go, which is what John thinks I am. But that person in the end just has the total physics and then wants to do quantum mechanics. Okay. And they should, but they can’t. So they do neuroscience or whatever they do. Okay. Okay. So then you have the aggregated middle ground, which is a dynamical system, which says, you know, what we can do is we like shad Lynn and others, which is very interesting. So we can project onto this manifold, which captures the information it’s dynamically sufficient. In other words, the observable, my eye goes left or rage. I get it just from tracking this, this, this.  </p>



<p>John    01:02:33    Yeah. But, but just, just be very, be careful though, is there’s a difference. I mean, I, I don’t know what Mike has done most recently, but before what he did was quite traditional that he would record from single units, see what they coded for, and then derive a psychophysical model of diffusion to bound. In this case with two parameters that were confirmed by the neural data, there was no theory of the config. Now, now, now they’re now had it’s right. So now I would say that it might comes up with these sort of you’re right. I, I, now that I think about it, you know, a dynamical system, then I think it’s closer to a neural pattern language. I think it begins to get to being a first level explaining.  </p>



<p>David    01:03:17    Yeah, exactly. So, so that’s, I just want to introduce this third one, so you’re right. So, um, that’s his point, this predictive low dimensional manifold that you move around on. Okay. And it’s useful. It’s great. I love it. But then there, the problem is it doesn’t tell you that sort of stuff where you should move it. Doesn’t tell you what the system designed.  </p>



<p>John    01:03:38    Can’t you just be teleological  </p>



<p>David    01:03:40    Where we wait, but what’s so beautiful about the Hamiltonian, right? What’s so beautiful about using information theory here is it tells you that something is being maximized under constraints and that’s a different language again. And so I, I guess, to be a pluralist here, I think there are multiple different pattern languages, right there. There’s the lowest level Lego building blocks. There’s as you say, John, this will dynamical system, no chiefs, but there’s a higher level yet, which tells you what the system is moving towards. And action.  </p>



<p>John    01:04:13    But I would say that, but I would say once you get to that, you don’t need to, you may not need to talk about neurons at all. You can give, you might not,  </p>



<p>David    01:04:21    You can just do,  </p>



<p>Paul    01:04:23    This is exactly the mapping that I’m seeking, right? These sorts of levels. And John you’re an Amarin with dynamical systems.  </p>



<p>John    01:04:28    No, no, but I’m not, I’m not, it’s not as much enamored. I’m just saying that as a functionalist who was much more interested in just looking at really inspired cost functions and psychological like errors and rewards and motivation, I was very much in that world and you can build cost functions out of those behavioral derived measures because of my work with David and thinking about this, I’ve been willing to see, especially after mark told me that he began to chew it with these trajectories, it felt very fine. Manessa, you know, I think we all should be willing to change our minds. I thought to myself, Hmm. It does seem as at least beginning to think with a neurally derived object, which is different from the behavioral, the derived objects that I work with. So I began to think that maybe we’re going to enter an era where we have two types of explanatory object on the same plane. I behaviourally derived one and an early derived one where they’re actually on a level playing field. You see that that’s not something I was really entertaining as much as I’ve been willing to work in with David Brock and talking to the people who are doing this kind of work that maybe you and I think maybe at the moment, that’s going to be the closest, your wish is that you have a hybrid functional object that is made out of behavioral variables and neuro ones, but dynamical ones.  </p>



<p>Paul    01:05:53    Hmm. So it doesn’t have to be dynamics, but this is exactly the sort of thing that I’m talking about. That to my level of satisfaction would be some sort of bridging.  </p>



<p>John    01:06:03    Um, but it’s not bridging because it has that. They are, it’s a flat evidential landscape. And other words, they’re both being used to explain. There had been derived, you know, deriving from behavior and deriving from neurons. You could say one came up vertically. The other one is horizontal, but the space they occupy is not vertical with respect to each other. I think that is that okay?  </p>



<p>Paul    01:06:30    Yeah. I’m okay with that. But it’s interesting that I, I just, I, I maybe I wasn’t, I have not been explaining myself well in this partly because it’s an unknown territory. And so it’s impossible to explain what you don’t know, uh, how it’s going to look. Right. So I don’t think it’s going to be a dynamical systems, state space trajectory. That’s gonna make me feel like I like that’s going to be the bridge. You know, I’m gonna say bridging again, but, but some sort of mapping, some different level of understanding. And David was just saying that there’s going to be multiple levels. How many levels are there going to be? How many do we need to do  </p>



<p>David    01:07:05    Many infinite? I actually don’t. I have to say, I don’t think because of this feature. So it’s interesting this question, right? Because we do know that there are an infinite number of models if you’re allowed to have an infinite number of parameters. So, right. So you can always fit a phenomenon that is fit with ed with plus one and up. So, um, I think it has to do with what satisfies our desire for understanding. And I mean, this gets to pedagogy. Fairness is kind of a weird digression, but I’ve always thought that great teachers can explain the same idea in multiple different ways. I’ve just been reading a book called 99 variations on a proof. And it’s, it’s an infusion to a French novelist rent and quinoa’s book called exercises and style. And he shows that you can solve this cubic equation, prove this cubic equation, 99 different ways. Right. And who knows if that’s the upper bound, but they all aluminate what a cubic is and what a solution means and different human beings on this planet will like those proves to different degrees. I love that. And I feel that there’s no reason to assume that there there’s just one or two or three or four, there’ll be multiple  </p>



<p>John    01:08:16    Different level. Although I think it’s, I mean, that’s, I like ultra pragmatism. I would say that, that there will be a few favored levels for the best effective theories that you can do pragmatic work with. You can transmit understanding you can lead to new experiments, test new hypothesis. I mean, the best effective theory is the one that leads to the most fruitful number of conjecture hypothesis. Right? So in other words, it seems to me that it would be very odd to not all converge on some cluster of effective theory levels that would all work.  </p>



<p>David    01:08:56    I don’t think that’s true. I mean, I gave the example earlier of Newton. You know, the way you did this is you just take conic sections, you get circles, you get ellipses kind of orbits, and then you can do it algebraically and you do it with calculus. And it’s just turns out to be much more efficient than doing a geometrically. But I, I, I’m not sure. I think John, I think by virtue of the preferential attachment nature of culture, that right, that there is a kind of a winner takes all dynamic. There will be a few preferred formalisms, but I’m not sure there’ll be preferred because they’re the best in some objective.  </p>



<p>Paul    01:09:32    So in the case of just to bring it back to heat again, where we all feel comfortable with this idea of, you know, what heat is relative to the collection of molecules, is that it, I mean, we all agree. That’s fine. We’re all comfortable with it. Are there more levels that need to be had that could be had, we’ll be a better explanation.  </p>



<p>David    01:09:53    There might be, there might be more parsimonious means of describing it. I mean, it’s true. Perhaps there’s something about the simplicity of the phenomenon that doesn’t permit.  </p>



<p>Paul    01:10:04    That’s why that analogy might not be right between brain and mind  </p>



<p>David    01:10:10    You take something, but you take the example that I gave of a cubic right. Pretty simple thing. Right. And you can just multiply proofs. Um, so I don’t know, I don’t know what the best analogy is. Okay.  </p>



<p>John    01:10:21    I think also, I mean, I it’s been, I read it a while back, but you know, Rosa Cowan Dan tenons wrote, um, about, you know, the ventral pathway. And does it count as an understanding rather than what we’ve been saying, which is just, you know, uh, an opaque fit, right. And I actually think they make a good case, but at one point Rosa Calla and Dan Thomas talk about the contrarian principle, that the more, the more complex phenomenon becomes. And I’m, I’m sure I’m mangling this, the number of ways to actually get it done goes down, right? That, that, that, that simple things can be done in a lot of ways, complicated things, complex things reduce the number of degrees of freedom you have available to get it built. And so one of the reasons they argue that there’s genuine insight given from their work on the ventral stream and they make, you know, is that the best predictor of the neuro responses in the ventral stream is now given from a deep neural network that was trained on images.  </p>



<p>John    01:11:32    In other words, it is kind of fascinating that if you want to predict, when you go into an area of the ventral stream, what the neurons will look like, you’re going to get a better prediction from your deep neural network, right then from what you think. So, so they argue that first of all, that isn’t a level of abstraction because there are no neurons with biophysics in that system, but they then say that the reason why that may be happening is that things like object recognition in a layered system, there aren’t that many ways to actually do it.  </p>



<p>David    01:12:04    I mean, I w I don’t quite understand what they’re talking about, because  </p>



<p>Paul    01:12:07    Let me jump in real quick here, because I’m out of time just about, so let’s end on this, but so, so David, let me give you the last word there. And, uh, I’ll just throw in. What if it’s the case that object recognition is just easy. And so there are many different ways to do it. And then David, this is a complexity question. So I’ll, I’ll let you address the, the many ways versus few ways to do complex things.  </p>



<p>David    01:12:35    Well, I don’t, I mean, I don’t have a definitive answer. I simply just say that if you have some Boolean function, it can be realized in an infinite number of ways. I don’t understand this idea, that complex things have few ways of being  </p>



<p>John    01:12:47    Well. I mean, they convert an evolution that, you know, wings, right. That they ended up having a similar shape. It’s not like you can have,  </p>



<p>David    01:12:55    But they’re realized  </p>



<p>John    01:12:56    Totally different, but their shape is  </p>



<p>David    01:13:00    Well, that’s the function. I don’t know. I haven’t read the paper, but I bet it’s wrong.  </p>



<p>Paul    01:13:05    That’s a great, great question, guys. So  </p>



<p>John    01:13:09    Thank you very much for putting up with us. Thank you.  </p>



<p>Paul    01:13:11    That’s fine. Oh, no, thanks. I appreciate you guys piling on me there for a long time. That was great. Yeah.  </p>



<p>David    01:13:16    That is new Mexican photons that you’ve seen me moving like a sundial  </p>

</div></div>]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/078-Krakauers.mp3" length="71930672"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[
















In this second part of our conversation David, John, and I continue to discuss the role of complexity science in the study of intelligence, brains, and minds. We also get into functionalism and multiple realizability, dynamical systems explanations, the role of time in thinking, and more. Be sure to listen to the first part, which lays the foundation for what we discuss in this episode.



Notes:



David’s page at the Santa Fe Institute.John’s BLAM lab website.Follow SFI on twitter: @sfiscience.BLAM on Twitter: @blamlab Related Krakauer stuff:At the limits of thought. An Aeon article by DavidComplex Time: Cognitive Regime Shift II – When/Why/How the Brain Breaks. A video conversation with both John and David.Complexity Podcast.Books mentioned:Worlds Hidden in Plain Sight: The Evolving Idea of Complexity at the Santa Fe Institute, ed. David Krakauer.Understanding Scientific Understanding by Henk de Regt.The Idea of the Brain by Matthew Cobb.New Dark Age: Technology and the End of the Future by James Bridle.The River of Consciousness by Oliver Sacks.



]]>
                </itunes:summary>
                                    <itunes:image href="https://episodes.castos.com/braininspired/images/art-krakauer-2-01.jpg"></itunes:image>
                                                                            <itunes:duration>01:14:37</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 077 David and John Krakauer: Part 1]]>
                </title>
                <pubDate>Tue, 14 Jul 2020 15:30:25 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-077-david-and-john-krakauer-part-1</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-077-david-and-john-krakauer-part-1</link>
                                <description>
                                            <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2020/07/art-krakauer-1-01.jpg" alt="" class="wp-image-1068" />



<div class="wp-block-image"><a href="https://www.patreon.com/braininspired" target="_blank" rel="noreferrer noopener"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></a></div>



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2019/01/JohnKrakauerMaster_hires-1-370x370-bw.jpg" alt="" class="wp-image-635" width="200" height="200" /></div>



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2020/07/davidkrakauer.jpg" alt="" class="wp-image-1069" width="244" height="167" /></div>



<p>David, John, and I discuss the role of complexity science in the study of intelligence. In this first part, we talk about complexity itself, its role in neuroscience, emergence and levels of explanation, understanding, epistemology and ontology, and really quite a bit more.</p>



<p>Notes:</p>



<ul><li><a href="https://www.santafe.edu/people/profile/david-krakauer">David’s page</a> at the Santa Fe Institute.</li><li>John’s <a href="http://blam-lab.org/">BLAM lab</a> website.</li><li>Follow SFI on twitter: <a href="https://twitter.com/sfiscience">@sfiscience</a>.</li><li>BLAM on Twitter: <a href="https://twitter.com/blamlab?lang=en">@blamlab</a> </li><li>Related Krakauer stuff:<ul><li><a href="https://aeon.co/essays/will-brains-or-algorithms-rule-the-kingdom-of-science">At the limits of thought</a>. An Aeon article by David</li><li><a href="https://youtu.be/HnA91mymghA">Complex Time: Cognitive Regime Shift II – When/Why/How the Brain Breaks</a>. A video conversation with both John and David.</li><li><a href="https://www.santafe.edu/culture/podcast">Complexity Podcast</a>.</li></ul></li><li>Books mentioned:<ul><li><a href="https://www.amazon.com/gp/product/B07ZDJPLD4/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=B07ZDJPLD4&amp;linkId=16519a2009243928d09873a9ff1d7222" target="_blank" rel="noreferrer noopener">Worlds Hidden in Plain Sight: The Evolving Idea of Complexity at the Santa Fe Institute</a>, ed. David Krakauer.</li><li><a href="https://www.amazon.com/gp/product/0197510264/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=0197510264&amp;linkId=39d73128189707346c55b6f16d794aad">Understanding Scientific Understanding</a> by Henk de Regt.</li><li><a href="https://www.amazon.com/gp/product/1541646851/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=1541646851&amp;linkId=d031c5c1f00485f7d203574889d951aa">The Idea of the Brain</a> by Matthew Cobb.</li><li><a href="https://www.amazon.com/gp/product/1786635488/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=1786635488&amp;linkId=1f88679448334aa2e28cd26d7138bfac" target="_blank" rel="noreferrer noopener">New Dark Age: Technology and the End of the Future</a> by James Bridle.</li><li><a href="https://www.amazon.com/gp/product/0804171009/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=0804171009&amp;linkId=9751c6a6e6311ca1b62b3ac2c60ac9e3" target="_blank" rel="noreferrer noopener">The River of Consciousness</a> by Oliver Sacks.</li></ul></li></ul>



<div class="wp-block-group"><div class="wp-block-group__inner-container"></div></div>



<ul class="blocks-gallery-grid"><li class="blocks-gallery-item"><img width="330" height="500" src="https://braininspired.co/wp-content/uploads/2020/07/31YCRAuHKIL.jpg" alt="" class="wp-image-1081" /></li><li class="blocks-gallery-item"><img width="333" height="499" src="https://braininspired.co/wp-content/uploads/2020/07/de-regt.jpg" alt="" class="wp-image-1070...&lt;/div&gt;&lt;/body&gt;&lt;/html&gt;" /></li></ul>]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[
















David, John, and I discuss the role of complexity science in the study of intelligence. In this first part, we talk about complexity itself, its role in neuroscience, emergence and levels of explanation, understanding, epistemology and ontology, and really quite a bit more.



Notes:



David’s page at the Santa Fe Institute.John’s BLAM lab website.Follow SFI on twitter: @sfiscience.BLAM on Twitter: @blamlab Related Krakauer stuff:At the limits of thought. An Aeon article by DavidComplex Time: Cognitive Regime Shift II – When/Why/How the Brain Breaks. A video conversation with both John and David.Complexity Podcast.Books mentioned:Worlds Hidden in Plain Sight: The Evolving Idea of Complexity at the Santa Fe Institute, ed. David Krakauer.Understanding Scientific Understanding by Henk de Regt.The Idea of the Brain by Matthew Cobb.New Dark Age: Technology and the End of the Future by James Bridle.The River of Consciousness by Oliver Sacks.







]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 077 David and John Krakauer: Part 1]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2020/07/art-krakauer-1-01.jpg" alt="" class="wp-image-1068" />



<div class="wp-block-image"><a href="https://www.patreon.com/braininspired" target="_blank" rel="noreferrer noopener"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></a></div>



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2019/01/JohnKrakauerMaster_hires-1-370x370-bw.jpg" alt="" class="wp-image-635" width="200" height="200" /></div>



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2020/07/davidkrakauer.jpg" alt="" class="wp-image-1069" width="244" height="167" /></div>



<p>David, John, and I discuss the role of complexity science in the study of intelligence. In this first part, we talk about complexity itself, its role in neuroscience, emergence and levels of explanation, understanding, epistemology and ontology, and really quite a bit more.</p>



<p>Notes:</p>



<ul><li><a href="https://www.santafe.edu/people/profile/david-krakauer">David’s page</a> at the Santa Fe Institute.</li><li>John’s <a href="http://blam-lab.org/">BLAM lab</a> website.</li><li>Follow SFI on twitter: <a href="https://twitter.com/sfiscience">@sfiscience</a>.</li><li>BLAM on Twitter: <a href="https://twitter.com/blamlab?lang=en">@blamlab</a> </li><li>Related Krakauer stuff:<ul><li><a href="https://aeon.co/essays/will-brains-or-algorithms-rule-the-kingdom-of-science">At the limits of thought</a>. An Aeon article by David</li><li><a href="https://youtu.be/HnA91mymghA">Complex Time: Cognitive Regime Shift II – When/Why/How the Brain Breaks</a>. A video conversation with both John and David.</li><li><a href="https://www.santafe.edu/culture/podcast">Complexity Podcast</a>.</li></ul></li><li>Books mentioned:<ul><li><a href="https://www.amazon.com/gp/product/B07ZDJPLD4/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=B07ZDJPLD4&amp;linkId=16519a2009243928d09873a9ff1d7222" target="_blank" rel="noreferrer noopener">Worlds Hidden in Plain Sight: The Evolving Idea of Complexity at the Santa Fe Institute</a>, ed. David Krakauer.</li><li><a href="https://www.amazon.com/gp/product/0197510264/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=0197510264&amp;linkId=39d73128189707346c55b6f16d794aad">Understanding Scientific Understanding</a> by Henk de Regt.</li><li><a href="https://www.amazon.com/gp/product/1541646851/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=1541646851&amp;linkId=d031c5c1f00485f7d203574889d951aa">The Idea of the Brain</a> by Matthew Cobb.</li><li><a href="https://www.amazon.com/gp/product/1786635488/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=1786635488&amp;linkId=1f88679448334aa2e28cd26d7138bfac" target="_blank" rel="noreferrer noopener">New Dark Age: Technology and the End of the Future</a> by James Bridle.</li><li><a href="https://www.amazon.com/gp/product/0804171009/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=0804171009&amp;linkId=9751c6a6e6311ca1b62b3ac2c60ac9e3" target="_blank" rel="noreferrer noopener">The River of Consciousness</a> by Oliver Sacks.</li></ul></li></ul>



<div class="wp-block-group"><div class="wp-block-group__inner-container"></div></div>



<ul class="blocks-gallery-grid"><li class="blocks-gallery-item"><img width="330" height="500" src="https://braininspired.co/wp-content/uploads/2020/07/31YCRAuHKIL.jpg" alt="" class="wp-image-1081" /></li><li class="blocks-gallery-item"><img width="333" height="499" src="https://braininspired.co/wp-content/uploads/2020/07/de-regt.jpg" alt="" class="wp-image-1070" /></li><li class="blocks-gallery-item"><img width="224" height="346" src="https://braininspired.co/wp-content/uploads/2020/07/cobb.jpg" alt="" class="wp-image-1071" /></li><li class="blocks-gallery-item"><img width="326" height="499" src="https://braininspired.co/wp-content/uploads/2020/07/newdarkage.jpg" alt="" class="wp-image-1076" /></li><li class="blocks-gallery-item"><img width="324" height="499" src="https://braininspired.co/wp-content/uploads/2020/07/riversacks.jpg" alt="" class="wp-image-1077" /></li></ul>


<div class="lightweight-accordion"><span>Transcript</span><div class="lightweight-accordion-body">

<p>David    00:00:01    I care that we are honest about what we don’t understand and, and the limits of our methods and the limits of our frameworks. That to me, is what’s important.  </p>



<p>John    00:00:11    I’ve come to the conclusion. There are two things that you cannot avoid. If you are thinking about science, one is you have to think philosophically and the other one is you’re going to have to deal with complexity and its broadest conception. I don’t think you can escape either of those ways of thinking that go along with the more traditional notions of what a scientist does.  </p>



<p>David    00:00:37    I just want to say, I don’t think mind emerges from brain mind. Emergently is engineered by an environment. And that’s the thing that I’ve always found missing in the mind brain is the third part, which is, I think it’s pointless to talk about mind without talking about environment.  </p>



<p>John    00:00:57    There seems to be a trade-off between collecting data versus actually slowing down and having a thing.  </p>



<p>Speaker 3    00:01:10    This is brain inspired.  </p>



<p>Paul    00:01:24    Hey everyone, I’m Paul Middlebrooks and those were the voices of two brothers David and John Krakauer. I’ve had John on before. Uh, he runs his brain learning animation and movement lab, his blam lab at Johns Hopkins where their work circles around motor learning and memory, uh, learning motor skills and recovering motor skills after brain injuries. And David is the president of the Santa Fe Institute, as FFI where they use complexity science to study all sorts of things. David himself has a wide range of interests that maybe all spring from a central interest in the evolutionary history of intelligence and information processing at various biological levels from molecules to cells, to collective groups, to societies and beyond. And you can hear David often on a SFS podcast, which is called, uh, simply complexity. So this is one of those episodes where I let it run a bit wild and take its own course.  </p>



<p>Paul    00:02:25    Although there are a few main themes as we talk. Those main themes are complexity science itself and its role in neuroscience. How to think about emergence and what are the right levels of explanation, especially in hierarchical systems, like the kind we’re interested in regarding intelligence and cognitive phenomena, um, cognitive phenomena from, you know, the simpler like reflexes or, uh, making eye movement decisions, uh, up to the highest order like awareness consciousness, projecting your astral body, things of things of that nature. Um, we talk about understanding, and this is really just scratching the surface. It’s fruitless for me to list everything that we talked about, but maybe the best way to characterize the conversation is that it’s about how to think about whatever you’re working on. We spoke for a long time. So I split the episode into this first half is pretty broad and lays the groundwork and the second half, which I’ll publish in a few days really heats up.  </p>



<p>Paul    00:03:29    And we talk more specifically about brains and minds and what role complexity, science thinking, uh, conserve moving forward. I linked to David and John’s information in the show notes as well as a few of the resources mentioned throughout at brain inspired.co/podcast/ 77. If you value this podcast and you want to support it and hear the full versions of all the episodes and occasional separate bonus episodes, you can do that for next to nothing through Patrion, go to brain inspired.co and click the red Patrion button there. This was especially fun for me. Uh, I have an older brother and John is David’s older brother, but my brother and I usually co cogitate, uh, about things like the sounds and smells of our respective children. You know, the latest, embarrassing mistake either of us has made, and there’s never a shortage there and things of that nature. So it was fun to witness David and John go back and forth on these topics of intellectual exploration. And in many ways, as I expected, I sort of just pressed go on the Krakauer machine, uh, and just tried to keep up anyway. I hope that you enjoy it and it makes your world better as it did mine.  </p>



<p>Paul    00:04:44    David, I’ve heard you, uh, talk about your conception of intelligence and stupidity. Um, and it, I don’t know if you want to briefly talk about what those are, but you also talk a lot about, about entropy, but when I hear you talk about intelligence too, and stupidity it maps directly on to what I’ve heard you say about increasing and decreasing entropy, is there a direct mapping between those two?  </p>



<p>David    00:05:12    There’s a relationship in the sense that, um, methods of information theory that deal with concepts like entropy are useful in understanding intelligence and brains and so forth. So I don’t use it in the sense that say my colleague, Sean Carroll would write in terms of the, oh, they might be related, but in terms of the, um, entropy in the universe, I use it quite operationally in terms of the reduction of uncertainty between a signaler and a receiver. And so that’s the common sense, I think,  </p>



<p>Paul    00:05:47    But from, from what I understand an intelligent process for you does decrease entropy, whereas a stupid process inherently by it’s by your of that seems to increase entropy. Do I have that mapping?  </p>



<p>David    00:06:00    Yeah. I mean to these tenders, uh, when someone is teaching you something, um, you come to an efficient means of arriving at the right answer, right, as opposed to a whole bunch of spirit answers. And at that scale, you could say the is reduced. Um, so very operationally, uh, but I don’t, I wouldn’t overstate. There’s much more to intelligence and stupidity than, than just a very simple information theoretic measure like entropy. It’s just a part of it. It’s just a bit of arithmetic that you use, uh, to get at the questions.  </p>



<p>Paul    00:06:33    Very good. And by the way, welcome, uh, David and John to the podcast. John, welcome back. And David, thank you. The brothers Krakauer being here together. Uh, this is a, uh, w what a day, what a, what an episode already.  </p>



<p>David    00:06:45    It’s a horror, it’s a horror movie. It’s a horror cost.  </p>



<p>Paul    00:06:49    I’ll have the background music going the soundtrack. So guys let’s talk about complexity first. Um, I, I just actually had Olaf Sporns on the show and he, you know, is a network neuroscientist. Um, and he has done a lot for introducing well complexity into neuroscience, via networks. Um, and I, I think that neuroscientists can, uh, be hesitant about embracing complexity in their research, um, because you’re already dealing with, you know, a, uh, a difficult and challenging area of study, and then you think, oh yeah, it’s complexity. And then you sort of approach it and you think, oh shit. And then because you end up studying complexity itself, it seems like no matter what. And then, and then you’re out of the realm of actually studying brains, or you could end up that way. Uh, there are a few different ways to think about complexity and I’ve David I’ve heard, you mentioned, you know, kind of slice it in multiple different ways.  </p>



<p>Paul    00:07:46    One is that complexity is networked adaptive systems. And so there are two words there. The network is one of them and I had Olof on and we talk all about that and adaptive is another one. So it strikes me that all companies, all complex systems are both networks and adaptive by this definition here. And it strikes me there are networks, uh, that are not complex because they’re not adaptive. So for instance, a spiderweb or something, I don’t know if you’d call that adaptive, but you could consider every cross a node for instance, and that’s not adaptive. Are there adaptive systems that are not complex though, are their adaptive systems that aren’t network? You wouldn’t consider a network. And by the way, both of you just jumped in because these are all questions for both of you.  </p>



<p>David    00:08:35    Yeah. So, um, possibly, I mean, you could, someone might claim a thermostat was adaptive, but I don’t think we treat it as a complex system. I mean, there’s a way of making this really simple, right? And that’s just that many of us have felt dissatisfaction right? With the methods and the frameworks that people have been using to deal with a certain class of phenomena in the universe and that class of phenomena we call living. Right? I mean, we don’t get into a big discussion about what life is. And complexity science basically says that across many of these different living domains, and by the way, we would include the economy in that. That’s what makes us maybe a little different, there are common principles and there are useful methods that can be applied to all of them. So I think there’s just the straightforward way of talking about it.  </p>



<p>David    00:09:30    That’s not mystical. And it’s very unfair. I think when people who are disciplinary say, what’s complexity, because if you ask a physicist by the way, what’s physics, and you should try this because I have, I always do this when, you know, cause you don’t want to do it because they can’t. Yeah. And the best answer you get is we do back of the envelope calculations, which tells you absolutely nothing. Uh, if you ask a biologist, what is biology? They’ll say, well, that’s, it’s the study of living systems. And then you ask them what life is and then it all goes horribly wrong. So all fields, right, have this problem of having an essential quality, uh, and then defined in terms of broadly of the domains that they studying and the methods that they use. So  </p>



<p>Paul    00:10:13    Do people really, do you think that people have the notion that there’s something mystical about complexity? Is that the general, uh, response you get is  </p>



<p>David    00:10:22    Well it’s worth getting at the history? I think part of the problem is that, um, very broadly speaking, there are two schools. One of them is interested in determinism and deduction and pattern, and it really goes back to Alan Turing in the 1930s. Okay. And that has its chronology through  lamb and all the way up to John Conway who just passed away. And most recently, of course, people like Steven warfarin and they’re interested in patterns and simple rules that produce them. And that’s one school. It doesn’t have much to do with the adaptive world, right? It’s not really natural science. It’s a kind of a logic that uses computation in mathematics. There’s another school, which is very much what the Santa Fe Institute is. And it’s interested in randomness empiricism, more interested in induction and universality. And for that reason, we were interested in economies and history and brains and genomes, right from the beginning.  </p>



<p>David    00:11:30    And those three elements are critical in particular, how laws, natural laws or contingent laws evolved regularities, exploited, randomness to create function. Uh, that’s why information theory is important. And that’s what we do. We’re looking for these constraints, if you like, or physical principles through which noise is filtered to produce things which have regularities that might be universal, meaning that we’ll find patterns in the brain that resemble patterns in culture. Um, and that’s our style of complexity science. And in the end, it’s only as good as the work that’s done done. Right. In other words, it’s a very high level specification and we’ll get into it of course today. But I, you mentioned Olaf is very good example. Olaf is in more of the applied side, of course, um, uses network theory to study the brain is extremely useful and insightful and no one calls that mystical and maybe weird if they did. So it comes down in the end to where the rubber hits the road. And that I think would demonstrate that there’s an, a mysticism involved.  </p>



<p>Paul    00:12:38    I mean, that was an interesting thing because I hadn’t thought of the mysticism mysticism aspect of people sort of being wary of it. It sounds like people are wary of it if, if they come in thinking it’s mystical and maybe it’s because it is, it’s still new, it’s complexity. Science is even, is newer than neuroscience, which I thought was interesting. The word at least.  </p>



<p>David    00:12:59    Yeah. Well, I mean, again, in that second tradition of randomness in induction and universality, you have people like Claude Shannon in the forties and Vienna and others and Mary of course, and Phil Anderson and our community Manford Eigen who were not considered mystical because many of them won Nobel prizes. So I’m not quite sure where that comes from, but I think what makes it in the eyes of many people suspect is first of all, most people hate ideas, philosophy and theory. Uh, they’re just, if it’s not factual in the most, you know, mind numbing the obvious way they’re suspicious of it. And the other is that they think it’s a theory of everything. And there have been people in my community who have made that mistake actually. Uh, and I think to be honest, it’s more of a crime in the first school, the sort of deterministic deductive school that looks for metaphorical correspondences in patterns. You know, this looks like that. Therefore I’ve explained it as opposed to this more, um, inductive school.  </p>



<p>Paul    00:14:04    I kind of get the sense when I, you know, watch you give a talk or read some of your works and I’m probably way off base. Is there a sense at the Santa Fe Institute of almost pride in being an underdog?  </p>



<p>David    00:14:20    I probably, I we’re all a bit childish. So maybe I suspect that if you don’t have pride in being an underdog, then you shouldn’t be assigned  </p>



<p>Paul    00:14:31    Well, what happens when you, when complexity science comes to dominate and gain the respect of all of the sciences that it should have already gained the respect of, and it already has.  </p>



<p>David    00:14:40    Yes. Fair enough. I don’t think it’s complexity science per se, but that’s another issue which is, and John should jump in here. There are personality traits that are correlated with news.  </p>



<p>Paul    00:14:54    I mean, Mavericks is one entire section of one of the recent books of a collection of SFI people.  </p>



<p>David    00:15:01    Yeah, no, that’s, you’re absolutely right. You’re right. And I think that I do believe it’s true that there are personality types that are drawn to novel frameworks and they’re not made uncomfortable by them. There are many other people who are equally good scientists who are satisfied by recapitulating. Uh, what has gone before and perhaps come up with discoveries that have huge depth. I think that there is a personality aspect of science. There’s no point denying it. People were obsessed with Albert Einstein’s hairstyle. Uh, they were obsessed with Richard. Feynman’s brushing his teeth with his own urine. Uh, we shouldn’t deny it. It just shouldn’t be confused with the quality of the work.  </p>



<p>Paul    00:15:46    John, I don’t remember if this is, I don’t know if you told me this last time you were on or if this was offline, but I think that you mentioned that David finally sort of pulled you in and convinced you that, that what you do is complexity science, or you should, I don’t know if he drew you in further and convinced you that what you do is complexity science or, uh, convinced you that you should come more toward the complexity science side of things. Maybe you can untangle that.  </p>



<p>John    00:16:16    Yeah. I mean, I definitely think that there are moments where I find myself my own work. We discovering for myself things that I could have just gotten a quick update about if I’d spoken to David in the first place. In other words, I think he sort of kindly allows me to think that I’m thinking new thoughts when in fact all that’s happened is that I’m beginning to see the light that has always shown from the top of that hill in Santa Fe. And also I think David, I’m trying to remember now when he talked about what complexity is, it’s really about hierarchical systems levels of explanation course greening. In other words, it’s not just about defining complexity. It’s about recognizing that there are multiple disciplines and that there’s something about the structure of knowledge. These are the ontology of knowledge that has to be subject to a way of thinking about it.  </p>



<p>John    00:17:11    And complexity science covers in a way, a hassle Chan who I spoke to about before in the philosophy of science, where he talks about going after those things, that scientists in their everyday work or conceptual schemers pass by proxy size, I think is the equivalent of that in so much that it addresses things that people have an inkling about. They give it upside grants, but don’t really want to have to tackle directly. And I think if you become about your subject as a scientist, I think I’ve come to the conclusion. There are two things that you cannot avoid. If you are thinking about science. One is you have to think philosophically and the other one is you’re going to have to deal with complexity and its broadest conception. I don’t think you can escape either of those ways of thinking that go along with the more traditional notions of what a scientist does. Does that make sense?  </p>



<p>David    00:18:20    Yes. I think again, I mean, it’s really important. And Paul asked that question about entropy at the beginning, put serious. When I say the complexity is as broad a church as physics, right? In other words, it shouldn’t be confounded with a method. And I think John’s absolutely right. It’s if we think about adaptive systems carefully, right? From genetics up to economies, which we do, that is what we do. Then there are common principles and moreover you have to be able to defend and explain why it’s defensible to be an economist, right. In other words, w and this we’ll get to it when we discuss emergence. In other words, we think it is not only acceptable, but correct to have multiple levels of description and explanation and understanding they might not align by the way with the current disciplines. And that’s a very interesting fact. I mean, we might have to reconfigure that space. It might be that there’s only two things or their agenda things up, but, but John’s rage. It’s, it’s, it’s an approach to understand this domain of matter, that exhibits purpose at multiple scales and an exploration of the kinds of ideas that work best at each of those scales. And so it’s a very broad church. I think that’s very important. And it’s,  </p>



<p>Paul    00:19:38    Well, it’s basically agnostic with respect to methods, correct?  </p>



<p>David    00:19:43    Well, yes and no, I it’s interesting. I did another interview on this and that. Yes and no. I mean, where we get a little upset, I mean, look, we’re having this conversation now in a period of huge trauma in this country. And, uh, many of my colleagues were involved in writing down mathematical models for epidemics. And I did an interview yesterday where we were talking about what’s missing from those models and those models by the way, are crap. If you’re trying to explain what’s happening to African-Americans and native Americans. So, uh, it does match up what methods you use. And I think a lot of these mathematical formalisms have been so beholden to the fantasy of parsimony that have been inherited from physics that they failed to address the complexity that we, or with our eyes open. See, so we’re not agnostic about methods. We think it matters which ones you pick and that they are true to the phenomena under investigation  </p>



<p>Paul    00:20:41    When you get a well-rounded knowledge base in complex systems, because I think it takes a broad spectrum of knowledge to really, I mean, I, I certainly don’t have a good grasp of the entirety of what complexity is because it touches so many different realms. Uh, and it does cover many different methods if it’s not agnostic to methods at at least, uh, has a broad swath of methods that it can employ it’s agnostic regarding which, which is the correct, whether there is a single correct method it’s, uh, purpose is to correct pick the correct method, given the problem, for instance, perhaps putting words in your mouth, but when you get this well-rounded knowledge that is necessary in complex systems, um, does it make, you know, does it sort of transfer it? Does it make, can you, can you hop around between complex systems and understand them using the same approach more easily once you have a broad base in that knowledge? Does that make sense? It does.  </p>



<p>David    00:21:38    Again, I want to say something quite subversive. I mean, I believe I have the sort of same attitude towards complexity as I do to dentistry, right. That I wish that teeth were all healthy enough, that dentists could go away. And I wish that we thought thoughtfully enough about the living world, that complexity science cook. I don’t care about an area of science in the slightest. I care that we are honest about what we don’t understand and, and the limits of our methods and the limits of our frameworks that actually is what’s important much more than anything else. I, yeah, I, I, there’s too much emphasis, I think on this sort of what the last thing I would like to see happen is complexity become disciplinary. I think that the Institute itself, for example, to the extent that it represents that world has to be constantly new teaching into some hopeful monster that can address difficult problems of the future. I’m much more comfortable with the X-Men model of science.  </p>



<p>Paul    00:22:38    I mean, there are common principles to complex systems, uh, and I’m wondering if, so, we’re going to talk about its relation to neuroscience and the brain and the mind in just a moment. And I found myself wondering if there are known principles, um, from the study study of various complex systems that have, you know, transferred from one complex system to another such that you realize in the new complex system that you’re beginning to study, you see where the actual holes are and where some principles from this previous complex system that you are, that is already well-studied where some of those principles are supposed to fill in, in these holes and be able to then predict, uh, what you might empirically find in the new discipline you’re trying to study. That was a big bag of words. So I apologize,  </p>



<p>David    00:23:31    John, do you want to try that on first? And then I can jump in, I,  </p>



<p>John    00:23:34    I don’t think I can be authoritative enough about another area outside of neuroscience to know whether it’s, it will lead to some savings in the way we apply it to neuroscience, because we’ve learned it in others. I have to believe that when it comes to physics, you know, whether it’s, you know, condensed matter physics, solid state physics, and what’s been learned about, you know, hierarchal systems and emergence and physics, I must believe that though, that the physicists could be of huge value to us when it comes to thinking about these things. So who fill out this and he just died and the way he thought about the disciplines and the way he used examples from, from physics and super conductance, I find it extremely informative and other words, effective theories in physics, you know, and how they can be hived off to some degree from their hierarchical position. And those are all hugely valuable ideas that I think neuroscience would benefit from. But I certainly have, so all of that emergence, what is it called? The discipline there’s disciplinary fragmentation that is ontological, that he, you know, Anderson talks about psychology and economics and biology, and he believes are the real disciplines. Right. He has it in his paper, right. So we must learn from the physicists. I would say I, but, uh, maybe I don’t know, David has to say about that, but I,  </p>



<p>David    00:25:02    Well, I, I mean, I could, I wouldn’t give the physicist too much credit, so I feel that they’ll just take it into Hein. Well, look, I mean, I, I’m something a mutant, I surrounded by this. I feel that, um, there’s another way of thinking about it, right? That nonlinear dynamics, right? It’s it’s, you can’t really do any modern science without doing some nonlinear dynamics. You can’t really do modern science without doing information theory. And nowadays it seems that you can’t really do neuroscience without talking about circuits or networks and et cetera. And it goes on and on and on. And of course we’ve been working on dynamics on networks, you know, nonlinear dynamics since the beginning. And it’s, so it, in a way there’s this natural diffusion of more advanced, I’d say methods that partly a restricted to the Institute, quite quite the opposite. And, and you just look at the history of neuroscience.  </p>



<p>David    00:26:05    There is a history that, and John, Chris knows it much better than I do. And you do too for, but certainly if you think about McCullough and pits and Von Neumann and John Heartfield, and there’ll be a lick. And most recently people like David Munford and, and there’s this obvious foment in new techniques and methods that they actually describe as complexity themselves, quite interestingly. Um, so it’s hard to imagine any field evolving if it were not to be open to new formalisms. And I actually think, I mean that we haven’t even started and we might get there. And I think the emergence point that John raised is very important. I don’t think we have a clue how really, to theorize about things like the brain and we’re still in the descriptive phases that that’s, that’s great. Lots of opportunities. Where  </p>



<p>Paul    00:26:55    Do you think we are in complexity?  </p>



<p>David    00:26:57    But again, you know, I think that it’s a brand new area. I mean, think about things like the use of maximum entropy approaches at Jane’s first pioneered in statistical physics that people like there’ll be a lack of use. So effectively in looking at spiking, um, that people at John Hart, you so effectively took ecology. Look at scaling theory, that’s been so effective in looking at allometry. And so this, this is, you know, a couple of decades. And so you’re right. We’re embryonic.  </p>



<p>John    00:27:32    And does it still does seem like it is a, it is diffusing over for that example just gave from, from physics into other diseases.  </p>



<p>David    00:27:39    That’s absolutely, but, but also mathematics and, um, enlarge it. Uh,  </p>



<p>Paul    00:27:48    But yeah, you know, I asked about it about certain principles transferring to new domains so that when you’re exploring a new domain, you might know what to look for, you know, and you just mentioned, um, scaling laws and, you know, you can think of something like scale free distributions and how they are common among all sorts of complex networks, adaptive systems, and, you know, indeed you find them in the brain. And I’m wondering like how many of the, how many of these different principles are there that we get, you know, from a table, go look and think scale free. I should find that at this level, when I look at the, um, you know, mechanical level or something at the spiking neurons, I should find a scale free distribution and aha. I do. And you know, how many of those types of things do transfer across complex systems? I mean, I, I know it’s very,  </p>



<p>David    00:28:33    I’m not, I’m not a lover of that kind of work, to be honest. Um, I’ve used, it was very descriptive and phenomenological. In other words, it’s true that it’s intriguing. Of course there was a huge brouhaha when some of our faculty actually got very involved in small world networks. It was then small world. And now of course it’s, it’s, it’s fat tailed and it should be something else in a few years. And I really, yeah. Right. But I have nothing against that, uh, because it’s, but it’s really just shining a flashlight in an area that we have to think about much more carefully empirically and with much more fine-grained models. And so, um, I don’t like papers that pretend to report an insight, just they doing fancy statistics, which is all you’re talking about, by the way. So, I mean, when people like pair back and others got interested in, you know, Paolo distributions around self-organized criticality, they provided a model, the San pan model. Now it turns out that same time model was wrong, but nevertheless, it wasn’t enough just to describe it. So I think that some of that, what tends to give complexity a bad name, because it’s a bit superficial, it’s where you begin and then you go and do some real experiments and, or generate some real theory.  </p>



<p>John    00:29:53    I mean, somewhat it’s somewhat analogous. You know, you talked about Olaf and, you know, Danny Bassett who, you know, there are a lot of these metrics that can be applied to brain data, but sort of to what David said, they tend to be very atheoretical. And other words, you get a lot of descriptors of the connectivity, but unless you have a question to us, why should it be this versus this it’s a beginning. And, you know, Danny herself has sort of admitted how does one go beyond increasingly sophisticated, descriptive statistics of networks to something that begins to sound like an explanation of something,  </p>



<p>Paul    00:30:38    But you have to open the bag and see what’s in the bag before you can theorize. Yes,  </p>



<p>David    00:30:43    But I just want to, but you know, Danny, like John is one of our professors. And so, and I, a lot of, I know her very well, but I would say that Dan is a great example of someone who did precisely that. I mean, she said, look, there’s some tantalizing statistical evidence. Now let’s go in there and, and do control theory and do some experiments and do good science. I mean, there’s nothing surprising about that. I think if you stop at the level of phenomenology it worst, it’s numerology, it’s just finding patterns that are meaningless because they fall out of some very simple central limit theorem. So, you know, oh, wow. I found all these Gaussians yet. When, of course you have, because we know what happens when you add up random variables. And I think that the, we have to go further and I think a large part of what’s happening in complexity science that is that community of researchers interested in these adaptive networks have been doing just that.  </p>



<p>Paul    00:31:37    I mean, there is just, there’s been a siren call for more theory in neuroscience now for, I don’t know, a decade, I don’t know when the, I don’t know when it really started to gain volume, but, um, uh, the pushback to that would be, yes, of course we need more theory, but we also, you know, we’re, we’re just seeing data. So you could think that it’s jumping the gun and expecting too much, but we do need more, you know, any, anytime I ask anyone that question, it’s always, yes, we need more everything is  </p>



<p>David    00:32:06    We need more thinking,  </p>



<p>Paul    00:32:08    Okay, well, let’s start thinking. So, but I mean, what, what do you mean by that? Because thinking to me means, okay,  </p>



<p>David    00:32:15    Not necessarily, not a chore, in fact, no, I don’t feel that way. Look, I mean, we’ve all been to talks right? Where we are sort of doused in vast quantities of visual data and you’re sort of left drowning on the one hand in all of this information, but with no question being asked, no lucidity and the way that the problem has been framed, and one Marrickville man, who one of the founders of this Institute was extraordinarily critical of talks that did either of one, two things, a just present your data as if that was somehow science and B just did math. Cause that’s not science either. And I, I do think that there is a kind of complexity to the scientific enterprise that you have to be prepared to spend time to think deeply about difficult problems and not look at the teacher for a bitch and you know, or have data and theorize it sort of laziness, I think. And it goes into this whole sort of machine based science that you just take the human out of the loop. And I think just thinking carefully and collaboratively for a long time, without a paper in mind is a very good idea.  </p>



<p>John    00:33:32    It’s a trade off. There is a trade-off. In other words, you know, there’s a wonderful book called James bridle called you dark age, where he gives a very bleak description of the world that we’re currently in, but he gives beautiful examples of how utterly useless surveillances. Right? So now that all these cameras and all this data, right. And it doesn’t work, it doesn’t prevent anything. Right. So in other words, there seems to be a trade off between collecting data versus actually slowing down and having a think. Right. And so yes, you can say, look, send out all your hounds have pluralism in the way that you do science. Okay. But don’t say that when really it’s an excuse to never really give a critical subversive talk ever. And, and I think, I remember I’m trying to remember it was at a Gordon conference, right? The last one, and I gave a talk.  </p>



<p>John    00:34:35    I think it was, I can’t remember someone said to me, you know, John, that’s a very different kind of talk to what I’m used to hearing is actually very interesting because I don’t think that way anymore because I feel well, that’s the way I give talks. And I think that’s the way they should be given, but it was extremely interesting. You said, you know, usually people show their data, right. And it showed data, but he says that, but he wasn’t saying that I didn’t show data, but he said, that’s it. And it’s very difficult to grasp the context what’s at stake. Why does this matter? How does it relate more broadly it’s as though none of that matters now of course you could always say that they could, if they wanted to, they could give all that context. And, but actually I’m not so sure. Right. Because synthesis is not something that is in any way taught or promoted. And so of course you can get into sort of discussions about what counts as thinking, but you kind of know it when you see it and you detect more often than not that it seems to be absent.  </p>



<p>Paul    00:35:43    Not everyone has the skill to synthesize. I think that it is a master skill that yes is under developed in at least in me and, and across the entire population, uh, broadly. But I think that it is one of the more important, and maybe underappreciated is maybe what you’re saying skills. Cause it’s hard.  </p>



<p>John    00:36:05    It’s also about, you know, I was reading, um, all of a Sachs’s essay on Darwin when he came back and his botanical experiments on all kids. Right. It’s an incredible essay. And what, what that essay exudes is the insatiable curiosity machine that Darwin was. Right. And then it’s just like, he was a scientist out of every poor, right. He just getting down onto the lawn to sort of look at the all kids. And I mean, it’s, it’s just this kind of question asking curiosity and experiment in one’s head as well as literally. And just that essay by Sachs on Darwin had more science in it than I’ve experienced at most talks. Now I didn’t know what that thing is necessarily, but you want that back,  </p>



<p>Paul    00:36:59    But so there’s a disciplined patience that comes along with that at least with Darwin and perhaps sax. I mean, is that part of the special mix?  </p>



<p>David    00:37:08    Yeah, it’s an interesting question. I don’t want to keep us away from some of these deepest scientific questions, but I don’t think we can avoid recognizing that the industrialization of science, the sort of the penetration of thought by economic considerations and the obsession with citations and each indices, you can’t imagine that that doesn’t compromise the quality of the enterprise. Right. And I think that what John is describing, I don’t think all of sacks give a shit about, uh, a citation. I don’t, if he, I don’t think he would even know the word, he probably thought it was an aircraft or something. So I think that the, and I think that’s important for everyone to bear in mind that it’s a complexity problem, right? Culture, the economy bears on how we reason and the way in which we produce science. And it would be nice and perhaps a bit idealistic if we could return to communities that were slightly less obsessed with the weight of paper and more interested in the quality of the concepts.  </p>



<p>Paul    00:38:17    Well, that’s what SFI is fundamentally  </p>



<p>David    00:38:19    As well. What has to be, I mean, it fails very often, but it wants to be. And certainly from my point of view, supporting an Institute like this, I’m absolutely committed to that and we’d go to the cross for that. But, um, but on the other hand, we live in this world that has these perverted.  </p>



<p>John    00:38:36    We had, we had a meeting at SFI where these two worlds collided pool last year, where I very much felt like it was like the fable of the store and the Fox where they invite each other over and then it’s impossible to eat the other’s food because it’s the wrong utensil. Right. And I felt that was a meeting held last year on the brain, actually at SFI where these two kind of ways of trying to talk about a subject went up against each other. And I’m not, I’m not actually trying to be, you know, uh, sort of bitchy for the sake of it. It was, it was really quite stark to see the discomfort in two very different ways of talking about the same subject, you know, wanting to be more broad and more abstract, maybe a little bit more formal, trying to sort of look across different areas versus let’s stick to the data. Let’s know what we know. I mean, it was very stark now again, one, shouldn’t say that there’s one type of science only, but neuroscience I think would benefit greatly from relaxing a bit and going for a walk and thinking things through, across disciplines rather than in this mad rush towards publication and data collection and substituting, whatever science is, which is another hard thing to define with all its ancillary subjects, whether it’s statistics or do you see it it’s if they were trying to do everything other than the science itself,  </p>



<p>Paul    00:40:07    I mean, that’s an institutional problem. There’s a lot of pressure on people to publish. Do you guys know your H index?  </p>



<p>David    00:40:12    Yeah. People remind me and it’s disgracefully bad. I think mine people who are being mean to be telling me mine.  </p>



<p>John    00:40:20    Yeah, no, no. I, I, I do not want to know and I don’t want to talk about  </p>



<p>Paul    00:40:28    Yeah. Not to celebrate naivete, but I don’t think, I, I didn’t know what an H index wise until I was, I think it was a post-doc maybe. And then it’s appalling when someone tells you  </p>



<p>David    00:40:38    Yeah. It turns out it’s just the logarithm of your citation factor. So it’s kind of a it’s, it’s hilarious. So a lot of fascinating  </p>



<p>Paul    00:40:45    And nothing, but I’ve had colleagues just sort of stay on the H index page is my H index up today. So, you know, what do you mean? It’s, it’s a career as well. You know, it is a career so, well, we can put these, let’s put these aside and talk about brains and minds. How about  </p>



<p>David    00:41:01    I, I do, I do want to talk about, and I guess it will be in, in relation to brains, this issue of emergence. I think it’s,  </p>



<p>Paul    00:41:09    Let’s, let’s just start off with it then. So you, you guys have mentioned emergence a few times now, and so this is pressing on your mind. What is it about emergence that is  </p>



<p>David    00:41:18    Talk about it generally, um, more formally perhaps, and then John will explain why to him, perhaps for me to why it’s important in brain in mind, emergence is another one of those words that generates a huge amount of confusion needlessly. And so let’s just make it very clear for everybody. So here it is, and that I accept that there are many definitions. I’m not going to define it. I’m going to talk about its operational value in relation to what we’re going to talk about. And that is that there are core screened theories that are statistically and dynamically sufficient and understand what that means. It means that there are aggregations of variables, which are principled, typically averages of some kind, which are better predictors of their future selves or as good. I should say that really qualified as good as any additional microscopic information would be in statistics.  </p>



<p>David    00:42:15    That’s called sufficiency in dynamic assistance, dynamical sufficiency. So in other words, you don’t get any additional predictive benefit at all by including more microscopic data. And the question then is when is that true? And when is it false? And I just want to give a very simple example water. If you want to understand the laminar flow of water, you don’t need to go to the microscopic constituents. You just have Newton’s second law, ethical CMA applied to fluid in Scandinavia Stokes equation. And it deals with macroscopic observations and measurements, density, pressure viscosity. Okay. If you want to understand the boiling point of water, that theory is useless. And then you have to do the theory of phase transition. So it could land out theory and that’s all expressed in terms of microscopic Hamiltonians, right. Energies of microscopic interactions. So according to what you care about, you use, these are the effective theory, the average theory, Navier Stokes that is not improved at all by including the microscopic, or you need the microscopic to explain the property of boiling of water.  </p>



<p>David    00:43:25    And the reason it really matters is because the macroscopic theory has huge advantages. First of all, it’s computable. So it’s completely positivistic this remark. You can’t do it with any computer, the size of the universe, if you wanted to include all the microscopic detail. So that’s just practical. Learnability when it comes to the brain, right? In other words, there are so many free parameters in the microscopic description that you’d never learn them, right? So there’s a learnability constraint, which is analogous to the computability constraint. And the most interesting one is the observability point, which is you wouldn’t know what macroscopic property, you need a microscopic description to describe in other words, unless you had it first. And so that’s a much more difficult when it’s a very top down concept that you can’t get to the macroscopic from the microscopic, you have to have an observable Pryor. So those are very practical reasons why it matches, uh, above and beyond the concepts of sufficiency. So I just want you to put that in the background,  </p>



<p>Paul    00:44:28    Is this all fundamentally due to the fact that, I mean, you don’t have, if you did have a computer that you would need a computer, the size of the universe, uh, cause otherwise, I mean, you have to simulate things at the microscopic level to eventually actually understand them and you physically practically cannot do that.  </p>



<p>David    00:44:47    We’re actually on two and it would be absurd. I mean, just the example I like to give is mathematics, right? So mathematicians proof, proof, theorems, just go to a math journal, look at the great mathematicians, Pitt punker, Ray, you know, uh, Grossman and Deek. You know, you pick your favorite. No. Where will you find any reference to psychological states of mind firing patterns of neurons, dopamine receptors, electrons, neutrons, or quarks, it’s not considered important and it’s not right because at the level of a mathematical proof, mathematics is sufficient furthermore, right? It would be in computable and non computable from the point of view of the atomic structure, you wouldn’t know what to observe and it would be not fundamentally non learnable as a discipline. So it’s, it’s, it’s moot to me. Uh, emergence is a fact. And the question then is whether you’re dealing with the laminate flow of water or the boiling point of water. And I think that’s really the interesting question when you do have to go down a level and, and for many of the systems that we study, I’m not sure we know.  </p>



<p>John    00:45:59    Right. Exactly. I think what people get confused about is what you were sort of hinting at Paul is when is it just a sort of epistemological limitation or when is it ontologically true? Now I would say, and David can correct me if I’m wrong, is mathematics and proofs in mathematics. So ontologically independent of those other things about the world, knowing about atomic structure is simply not relevant to mathematical truths. It’s not that they, if you have the computer all the time that they would add in any way they don’t. Okay. So I think the question is when you talk again about Phil Anderson’s disciplines, are they ontologically true? In other words, does the explanatory structure of the universe for ontological classes or are these just our cultural and the pistol, the logical failings that sort of splay out the way they do, but there are only a few true ontological objects, presumably down the level of physics, right.  </p>



<p>John    00:47:10    And everything else is just derived. And if we have a computer, the size of the universe, we could do away with all the disciplines. Okay. Now, in a way I don’t even care if one has to decide when it’s ontologically true or epistemologically true because you want actually get some work done. And so, you know, I think there’s a philosopher Stevens who gives us great discussion using evolution to talk about the independence when he talks about, if I remember correctly, a wonderful description of the shape of the marsupial mole and the golden mole. And these are two malls with completely different evolutionary histories. And yet they have they’re blind. They have snouts, they have FIC for, and they have claws. They they’ve converged evolutionarily on the same solution to digging through the earth. And that’s the explanation for their body shape. It’s adaptive to the environment they live in. Now, it would be very odd if you were to say, I need to explain these two moles by going into that developmental history or not, or how did, where did they come from? In fact, you’d be detracting from the actual explanation going into details, which will be different because they have completely different stories evolutionarily. Doesn’t it depend  </p>



<p>David    00:48:40    On what  </p>



<p>John    00:48:40    Satisfies you as you want to know why they have the shape that they have. And that, and the question you’re asking is that contrastive question, right? It would be very odd to say that that explanation at that level of contrast, why do they have the same body shape you see? And I think that’s the, I think to go down lower, it wouldn’t add very much other than that, unless you want to just say, why can’t they both have a worrying rotor instead. Okay. But that’s a different question. Why can’t they have a worrying screw? Yeah. That’s about constraints. And what’s available. That to me sounds a little bit like the boiling point of water. Why couldn’t they have some other structure to drill through the earth, but if you want to know why they share that body shape, given biological constraints, that’s enough, it’s adapted to going through the us.  </p>



<p>David    00:49:32    Well, you know, it’s, I do think it’s interesting. Every question is susceptible to both forms of inquiry. And if John were to look at those moles, he’d find that under the surface, they both had painted actual limbs as do dolphins and whales. Right. And so if that was the thing you cared about, this surprising homology that isn’t explained by selection as explained by common descent. And so I think it’s always going back and forth. I think I suspect, I don’t know John, if my criticism is that there is this belief and I’m not sure quite where it comes from that the most fundamental, the truest description is the most microscopic the most reduction.  </p>



<p>John    00:50:18    Yeah. I mean the other example, that’s given a lot by philosophers. I don’t know we spoke about this before. Paul is about causal contrast, which is there, is that what, you know, the one that’s given, I think Carl Craver gave it first is, you know, why did Socrates die? Right. And you know, somebody might say, well, Socrates died because he was condemned condemned to death by the Athenian authorities, the corrupting youth. Okay. Somebody else will say he died because he chose to drink the hemlock rather than to go into exile.  </p>



<p>Paul    00:50:48    Well, I thought, I thought it was a Caesar analogy. He died by a metal spike in his chest versus the Senate.  </p>



<p>John    00:50:55    So then you can say, well it’s because, you know, he dragged hemlock rather than English breakfast tea. And then you can say, well, how not operates on a certain part of your body? Now, the point is, is that all ontologically equal in terms of efficacy has explanations, but neuroscientists to the point that they would make will think that if you can work out the mechanism by which the hemlock makes you stop breathing, that that’s the best explanation for the death of Socrates. And it just isn’t right. And I think that’s the point is that there’s this strange belief in neuroscience that there’s a foundational, privileged causal contrast, and it, everything else will ultimately devolve to that causal contrast. And that’s the odd thing, whereas physicists absolutely fine having Navier Stokes equations for fluid dynamics versus having phase transitions and see that those are just different regimes of explanation.  </p>



<p>Paul    00:51:58    Isn’t ontology fundamentally out of our reach though.  </p>



<p>David    00:52:01    I don’t think so. I don’t think so. I, um, you can be Kevin chin. No, I know where you’re going, but here’s the next nice thing, right? Which is that it has to do with degeneracy that whilst it’s true, that our representation of the world might not be identical. I mean, I should explain what that means by the way. So I just have to formalize. This is because John and I argue about this all the time. I want to make sure I’m being clear. Say it sounds some structure X, we would call Y a representation of X if Y is the image of X under some structure preserving map. Okay. And so think about, you know, retina Tropic maps or motor sensory, her monkey li they’re all wise, right? And they lose intonation and X, but they preserve some structural feature of X, some geometric, some type of logical.  </p>



<p>David    00:52:55    Now, if that were not true, poor selection would be ruthless to us and remove us from the world. So it doesn’t mean that we are identical, why next are not identical by any means, but why has to maintain something, which is absolutely true about jacks? That’s the sense of which I don’t. I do believe it’s possible for ontological unity, not, not disciplinary unity. I am with John, but I don’t like this idea that somehow, because we can’t know exactly the world other than mediated through our senses and our instruments, that means that we know nothing about the world. That’s just false.  </p>



<p>John    00:53:31    That’s a very important point that there must be some similarity transform that as possible, because if there weren’t, I mean, that’s, what’s so nice. I think about strengthens is mole example is it’s what evolution is working on. It’s almost an example that evolution has given to us, right? That it’s actually converged on this body shape to burrow through the earth. Right. It’s sort of demonstrating to us that there is something ontological that it’s operating on because otherwise there would be no survival. So in other words, we have to believe that a mapping is that what David I think is saying is there has to be some mapping, right? Of course it has to be.  </p>



<p>Paul    00:54:16    So this is, so this kind of comes around and we’re going to, of course, which is totally fine with me. Um, but this kind of comes around to David Deutsche’s. To me, it kinda comes around to David Twitch’s conception of what explanation is. Um, I don’t know if you guys are familiar, but it’s along the lines. That explanation is, um, just the latest thing that’s hardest to remove and a better explanation is harder to remove as the explanation and, uh, between the epistemology and the ontology that we’re talking about. David, you’re saying that there’s some must, there must be some truth. If there is a mapping that does it here over time between X and Y. Uh, but, and I might be misunderstanding what ontology is, but then I would say that we only have epistemic access, uh, to that and not ontological excess.  </p>



<p>David    00:55:04    No, I understand. But again, this is the critical point about this sort of structure, preservatives to mathematicians call these homomorphic isms. And the critical point is it’s, I’m much more optimistic in some sense, because once it’s true, what you say by definition, we are instruments that make, make measurements, it’s physiology. Everything is right. Um, but, uh, as John pointed out, the, the behavior that ensues and those measurements has implications in that same real world, right. Which doesn’t really care about our Vista Margie, but it does care whether we can fly or swim. And so that maintains an ontological through-line and, um, it doesn’t matter to me whether or not we achieve identity. It’s just that we achieve some kind of structural correspondence  </p>



<p>John    00:55:58    And that’s well, you see, it’s very important. It’s deeper than that, Paul, is it it’s you have to decide, I think you had a really wonderful person on your show. I’m difficult to pronounce her name.  </p>



<p>John    00:56:11    Rita Terremoto. Yeah. She was very interesting on this point where she said, do not confuse the, the, the goals of science, of trying to seek the truth versus trying to seek understanding they’re not the same. Okay. Right. So I think she, you know, she was right about that. Um, it gets a little bit, which we’ll get to later about a piece of David wrote recently is that there’s probably a sort of veracity understanding trade-off and wouldn’t it be interesting if getting models that sort of difficult to understand that they fit the world better is a different discipline to science, which does have to have understanding in it, in my view to be called science, but there may be another discipline that may be closer. It’d be a better fit to the world, but will be opaque to us. And so if science is going to have anything to do with what, who, the people we’ve been discussing so far, then I think understanding must feature in it. Otherwise I think I’ll just give it another name.  </p>



<p>Paul    00:57:28    Well, this is an issue is I think that, so, so the philosophy of science or understanding in the philosophy of science has sort of exploded recently. And, um, actually John, you turned me on to how’s the chains, chains work, who turned me on to, uh, um, direct,  </p>



<p>John    00:57:43    Hey, actually I turned you onto hunt direct as well. Actually. That’s fine. I’m very glad. And actually he was mentioned in that podcast too. Um, yeah.  </p>



<p>Paul    00:57:57    Yeah. So she, so there’s like four other, you know, recent books on the different natures of understanding and our conception of them. And you guys couldn’t have heard this, but I just had, uh, Jim DiCarlo on the show who has sort of headed up this modeling the ventral visual stream, um, in feed forward deep networks and now recurrent deep networks as this hierarchical system. And it models on the brains really well and predicts brain activity very well. And now he’s controlling, uh, brain activity using the models to generate images, to control neural activity in the brain, by presenting the stimulus to the subject and his conception of control, which was fun because he’s pretty excited about this. He sees control and prediction as the same thing, which is understanding that.  </p>



<p>John    00:58:43    Yeah. I mean, I know, I know Jim, and he’s fantastic and, but that’s a complete and utter cop-out. I mean, he’s basically, um, decided that if you can do control the prediction, that will be the new understanding to kind sure. I mean, if you want to give this new name, but it’s much more interesting to me about what direct and others say, and, you know, in line with what Fineman and many others have said is that you need to have an international theory to build explanatory models of the phenomenon. And then you should be able to do intuitive work with that intelligible theory to generate explanatory models, to explain the physical phenomenon. And if you don’t do that, right. And you know, it’s the Dirac idea that we discussed last time I was on which you should be able to see the implications of the equations without having to go through the full derivation.  </p>



<p>John    00:59:38    You can do intuitive work. Okay. And science is disability. And maybe it’s your own way of developing an effective theory that you could work with to generalize, to do new experiments to that science. Right. If you don’t have that, then I don’t know how the, it proceeds another other than saying, well, well, let’s just stop doing that kind of science. And let’s just do, that’s just do deep neural nets and be modeled free. Right. But that’s not denying that we’re losing one thing over the other. And that there’s a trade-off because there is a trade-off.  </p>



<p>David    01:00:19    Yeah. I don’t, I mean, I have a lot to say about this pool. Sure. Take your time. Well, I agree with John, of course. I mean, whatever the other chap said was just ridiculous. Um, and if he wants to just make those two words have the same definition, the dictionary he can, but I rather have a dictionary of more than one word in it. So let’s just look at prediction. So let’s just make it again, make it simple. I find these things useful. So prediction is very simple. It’s just getting an input output relation correct. Out of sample. That’s the prediction. So if the input is time and space, you tell me temperature, that’s called weather prediction and you know, it doesn’t matter. That’s what prediction is. It’s an IO relation that works now knowledge, of course, the facts that go into making the IO work.  </p>



<p>David    01:01:09    But knowledge also goes into understanding. And I want to talk about understanding, and I know John is interested in this. I cite some work by John actually on this. So for me, understanding is the communication of why the IO works or the construction of the IO in the first place. So communication and construction. And let’s just make this quick, quick as everyone who’s listening to. This knows this already. If you are taking an exam and you copy someone else’s answer who, you know, always gets A’s right, that predicts success for you. You understood nothing. You used a simple rule and it worked okay. What most good teachers ask is once you’ve produced a result, whether it’s right or wrong, I say, how did you get there? Exactly. Why did that work? Why does summing up rectangles in a lineage? Give you integration. Anyone can use an integration formula, but not everyone can explain why they work.  </p>



<p>David    01:02:05    That’s the difference in a good mathematician and a crappy mathematician, quite frankly, or a good scientist and a crappy scientist, a good teacher and a trappy teacher, a good student and a crappy student. We know that. So there is much more than prediction, right? There’s much more than knowledge and understanding is tricky. And philosophers has commented on this. And of course, John sell most famously in his thought experiment of the, of the Chinese dictionaries in the room. And that’s been of great interest to me. And there’s a lot to be said about this. I think John’s cell gets a lot wrong. Um, and I do want to mention something John has done and my thinking on this topic. So again, understanding goes beyond the IO map. It goes to the explanation for why it works or how you construct when they’re FX.  </p>



<p>John    01:02:49    And just one thing on that before David, you know, I think it’s not to knock, obviously people like Jim to call on all the people who, who, who build these impenetrable, you know, IO networks, right? And their understanding finds other places to land. You know, how, how do you build a cost function? How do you play with the architecture? You know, what is the learning rule? There are other places where people show great understanding, but the thing itself, can I explain the performance of this system? Um, they admit that they can’t now that’s fine, but don’t buy because that’s true. Say I’m no longer going to consider that a problem that I can’t understand that piece. Okay. You just, just live with it now. We’ll, we’ll get, we’ll get on a little bit longer to what I think is a solution to that problem, because I’m not completely satisfied that you should just go when it comes to deep neural nets, it’s over parameterize. We’ll never understand why it’s this way versus that way, in terms of the connection strengths. And therefore let’s just fall back on having known what the cost function is, having known, what the learning rule is and just accept that it asks them tote. It learns it  </p>



<p>Paul    01:04:05    Well. So that what you just said was put forward as the beginnings. Uh, we, we won’t, we don’t can’t understand things yet. So let’s start with learning algorithms and objectives.  </p>



<p>David    01:04:15    We have to be careful that that that’s not what, I don’t know, your previous speaker, but, um, that’s not what you communicated. He said that. No, no, no. So this is a better prediction is understanding, which is patently false. I do want to give a bit of a history though, because we’ve not, we’ve been here before. Right. And we were here at the origins of science. And the example I let you give of this is Francis bacon in the Nova Morgana. And he makes this beautiful remark. He says, it’s very difficult for humans to draw straight lines or circles. So we use rulers and we use campuses. Okay. So we use tools and in some sense, they subvert our abilities and deep learning is just, you know, compass prime. Okay. Now, interestingly, that the tool that Newton developed with light and this was the calculus, well, they didn’t really actually, that’s a kind of falsity, but the fundamental theory of calculus, the relationship between differentiation and integration and the application to orbits.  </p>



<p>David    01:05:12    And it’s quite interesting if you read the Principia, which I have not, um, but read reviews of it, uh, or summaries of it. It’s quite interesting that the method of Fluxions, which was, um, Newton’s name for the first derivative of space with respect to time and break for velocity, um, was not in it. He actually, when he wrote it, he, it was too arcane and he was too paranoid to actually disclose his discoveries. He actually presents his results geometrically because they were better understood than his new methods. So Newton felt that just predicting using his methods was not sufficient. He wanted you to understand why his theory of gravity, we could reproduce Kepler’s three laws and you did it geometrically. Now, when he went on to discuss the inverse square law, hooligans hatred, it, because it was non mechanistic, it wasn’t understandable positive action at a distance.  </p>



<p>David    01:06:09    And then Newton turned around and said, hypothesis on finger. You know, it’s good enough. It predicts really well. Um, why should I have to do that to J cart, hated it and came up with his theory of vorticies, which was much more mechanistic, which didn’t work. So right there in the early days of the revolution was a theory that was predictive. The author chose to present a method presented in terms of a method that people could understand geometrically, more familiar with that. Um, but was criticized for being sufficiently mechanistic and only too predictive by public hooligans. And then there was a suggested alternative. So that then continued into its limit with quantum mechanics and what we now know as the Copenhagen school, where people like bore and Paoli and Heisenberg disavowed, any intuitive understanding of the physical world, they hated it and replaced purely predictive mathematics and, and the famous expression of that position with them.  </p>



<p>David    01:07:12    David Merman, when he said shut up and calculate don’t even bother right now. So physics has this tradition. It’s no different from what’s going on now in Euroscience and machine learning, and it generated endless discussion back and forth. And now, as of today, there’s a return to fundamentals of quantum mechanics, where people are trying to provide, as John said, and understanding for why these methods work, Maryville, man, Michael, your kid, Jim Hart, or at the Santa Barbara couldn’t stand this. They want you to provide a comprehensible theory as Einstein had and Schrodinger had. So this seems to be a quite term universal feature of the scientific enterprise, that there are those who tend to favor predictive efficacy, even if it forfeits intuition and those who don’t like that and feel that the humanistic aspect of science, the creative aspect of science requires understanding, are they both necessary for progress?  </p>



<p>David    01:08:05    I think they are. I think it’s, I think they are. And I think what, if anything, John might be saying, and certainly I would be saying is that the, the extraordinary power of a predictive frameworks in the face of complex phenomena that have very high dimension, we could talk about that, uh, makes this much more complicated to argue, right. In other words, um, there’s something about a complex phenomenon that is so much easier to predict than understand that, that side of the equation, if you like gets differentially weighted. And, and that’s a bit of a problem, but only through only via, uh, simulation though, right? No. And I, in other words, you know, if the nice thing about fundamental physics, right, is that you could have your cake and eat it, right? So you could say, I’m going to predict, you know, with this theory to a hundred decimal places, and I can write down on one page the equations, which generate that solution, right. Which human brains can, can pass, but when it comes to, uh, projecting market trading or the spiking of that particular neuron, unfortunately it looks as if the representation of the structure, this why thing is a very, very high rank. Right. And that has lend itself in the short term to the utilitarian school, which says either prediction is understanding, which is kind of silly, or we don’t care about understanding because you can never accomplish it.  </p>



<p>John    01:09:37    Yeah. I mean, I think that’s absolutely true. I mean, it goes back to the question before, you know, this is exactly, you know, I was looking into this quite a lot and direct actually to have a whole chapter in chapter seven of his book on exactly what Dave was talking about with quantum mechanics, you know, the sort of Schrodinger equations and the wave function, being something that people could do intuitive work and picture with versus Heisenberg’s matrix mechanics being just churning out the numbers. Right. So it’s absolutely true that we’ve been here before. And again, it speaks to the parochialism of neuroscientists that they just don’t sort of look out their own discipline for the most part. Um, but I do think it’s true that one criticism that has occurred is some people would have said, well, the reason why we’ve had understandable neuroscience for quite a while is that we made such simple experiments that it was the simplicity of our experiments, that sort of reigned in the multidimensionality that David’s talking about.  </p>



<p>John    01:10:36    And as soon as you started going into the real world and with more naturalistic experiments, it got hopelessly complicated, right. It wasn’t just one buyer in a dark room, it was complex images, movies, things like that. Okay. And you know, you know, you had URI Hassan on, right. Who very much made the point that it’s just not rage to consider the human brain trying to overtly model the world. It just has so many neurons. It has so many parameters. It can just do direct fit. Okay. And that where it’s, again, it gets to the epistemological era of doing simple experiments that require representations, that lead to understanding when in fact you have disability to fit the world and you fit so much in the course of your life, that everything is interpolation. Okay. You only really need what David was saying, sort of out of sample, if your training set was so small, that the probability of you having discovered what you’re going to discover in the rest of your life is not present in the original training set.  </p>



<p>John    01:11:42    Okay. So there is a sense, not just that there’s a, there’s an option, but maybe it’s true that for the most part, because there’s no over representation, we shouldn’t come up with an intuitive understanding and just accept that you’ll fit in. And I think he gave the example evolution, right. That it mostly optimizes, but it’s not going to predict what the next species is going to look like. Right. It’s not like you’re going to predict what the next creatures morphology will be within constraints. So let’s just do interpolation with vast sampling and loads of parameters. Okay. And, but interestingly enough, he, he, he, he fell short of cognition, right. He admits that cognition is not understood that way. And, and, you know, and it’s very interesting that, um,  </p>



<p>Speaker 2    01:12:35    You mean how, how that maps  </p>



<p>John    01:12:37    On to mind. It doesn’t work for mind. He admitted that. And Geoffrey Hinton actually says that the last thing, the last thing that will yield when it comes to AI is cognition. Okay. So you’ve got this interesting thing that evolution of neural networks. And if you believe in, you know, cognition getting more complex as you move towards primate of a kind, both of them don’t yield to what’s happened so far in neuroscience or in an AI. And the reason I bring this up is in the meantime, we may need intuitive explanations of these phenomena to work with, even if those things are true. Right. So one of the biggest things that I would love to prove is David, you know, when it comes to all these things, hierarchy, emergency complexity, aggregation is I’ve always wondered whether there will always be some intuitive, understandable form of the question at some level, of course, greening, even though at some level it’s at the predictive level of the system, you won’t understand it.  </p>



<p>John    01:13:48    So in other words, you just accept, but there’ll be another way to talk about the phenomenon that will yield to more intuitive explanations. Okay. So you can have your matrix mechanics, but there’ll always be a wave mechanics way of talking about it as well. And maybe that’s just hopeful thinking on my part, or maybe it’s kind of true that hierarchical, complex systems that are Mitt details as you go up, have two secrets about them. One is that predictable and hard to understand. And there’s another form that can be intuitively talked about. Is there some principle there that you can have two flavors in any complex system?  </p>



<p>David    01:14:27    Yeah. And I, yes. I want to get back to CRNA a second. Cause I realized that I didn’t complete that or a Boris, but to John’s point, um, I mean I’ve always felt that there are two paradigms that are emerging now, right? There’s the fine grain paradigms of prediction that have these very high dimensional phenomena that are somewhat in compressible. Right. And so they don’t lend themselves to these intuitive frameworks, but then there are the core screen paradigms of understanding. Right. And a good example of that just to bring it to neural networks. I think John mentioned, this is Doug. We know how reinforcement learning works. Alpha zero has a very, very simple learning rule, right. And that’s the level at which understanding tends to operate in that case, which is we have as a community in DeepMind and their colleagues come to understand that there’s a very simple kind of learning rule that can train these very large elaborate structures. And however that the elaborate structure once consolidated is opaque. And, and I think it’s true. I think that understanding of complex systems might be a little bit like the learning bit and the projection is done by the structure. That’s,  </p>



<p>John    01:15:39    I mean, that’s, that’s very much the Lydia trap and cutting position, right. That they were, they said, look, you can write on, you know, on half a page, the reinforcement learning rule. Right. And the objective function. Right. And maybe that’s, I actually think I don’t actually fully agree with that is I actually think, um, there are psychological terms, um, that can be used to explain phenomena that neurally are too complicated.  </p>



<p>David    01:16:10    I agree with that too. So I started to make a point. Uh, so I, I think that’s right. I think the theory of scale that my colleagues here have been developing Jeff Jeffrey, Western others are a good example of these coarse-grained frameworks that also do extraordinary prediction, core screened prediction. And what John is suggesting I think is which I think is correct. And demonstrably, correct in some domains, is that not only is this, there is this kind of theory, like the learning theory, which is a theory that can be understood to train a network, but there might even be a core screen theory of how the network works. And, and that, that, that’s interesting. And I there’s been a lot of it as you know, I mean,  </p>



<p>John    01:16:50    And I believe that, I mean, I think that’s what I think, I think that cognitive neuroscience and psychology psychology is, you know, the way, you know, William James wrote his entire book, right. That defined or disciplines, it gets back to what we discussed at the beginning. These are these disciplines ontological or their piston illogical. And I’m just saying that, wouldn’t it be interesting if psychological terms just happened to be the correct course grading to discuss the performance of systems that at the neural level are opaque. Would that be more interesting than if they weren’t, I’m saying that it doesn’t surprise me, that there are, as David said, you know, averaged out objects that you can think with, and I’ll give you an example just to be very neuroscience. You know, when you look at, um, what Mark Church and Krishna Shenoy have done, where they’ve looked at motor planning and motor execution as a trajectory through a state space.  </p>



<p>John    01:17:51    Right. So they’re basically, it’s very interesting, you know, David’s talked about the fact that neural networks and dynamical systems, you know, that they have an intimate relationship to each other. There are dynamical neural networks, right. But it’s interesting that when you look at the work, that’s now very in sort of in fashion right now in science, where you take millions of neurons and you do dimensionality reduction, and then you look at a trajectory through a sub space, the dynamical systems approach. What’s interesting, but you know, but it’s very much about what the Schrodinger Heisenberg thing is that or Fineman diagrams, right? What fireman diagrams were another visual visualization tool that allowed people to have intuitions. Okay. So if you talk to mark Turton, I remember him saying to me as an aside, you know, I’ve started to look at so many of these trajectories through these, you know, state spaces.  </p>



<p>John    01:18:44    You can see them, he can see that he actually thinks with them. And so it’s, it’s very interesting that these are derived objects from neural data thousands, if not millions of neurons. But if you asked him, do I need to know the connectivity of each of these neurons? Of course not. And in fact it has to be not the case because in any given animal, you’re going to see very similar trajectories, but they don’t have one-to-one correspondence of, of their neurons. So the invariants happens at a level above those details. And you can think in that at the level of that invariance  </p>



<p>David    01:19:22    Yeah. I’ll give you a good, I’ll give you a good example of that actually from a totally parallel domain and that’s computer programming and the great genius of computer programming is Donald Knuth who wrote about computer programming languages and many, many other things, and makes the point that computer programming languages are the interface between the machine and human understanding. Human reason. They’re not a tool for programming. You can use machine code for that, right? You could write an assembler, no one rates in assembler, everyone rates in for dragon or C plus plus, or Python or whatever we’ll go, or whatever your favorite languages. And there’s no debate in the computer science community about the value of using high level programming languages, no one, there’s no kind of machine code zealot who says that the only way to really understand it is to rate in ones and zeros you’d be a lunatic. And so I think it’s the same thing. I’m sure. Well, there probably is, but I think that’s the same thing is that, you know, our high level, psychological cognitive, literary mythological abstractions stand in relation to reality as high level programming languages, standard relation to circuits, they allow us to not only understand them better, but program them better to control them better. It’s it’s I don’t see why that’s even controversial.  </p>



<p>John    01:20:45    I mean, just to that point, I think sometimes this is the heart of the matter is it’s not that there’s, um, hierarchical organization of a computer or nervous system, just so that we can understand it better or program it better. It’s actually for itself to control itself better. In other words, the nervous system has its hierarchical structure because that’s the way you have to organize a complex system. In other words, you know, the nature of the commands that go out of motor cortex versus premotor cortex are very different in terms of their details, the type of detail required to the level of the spinal cord in terms of that, you know, temporal and spacial dynamics, isn’t present higher up the hierarchy, you omit details. So that control, as David said is easier.  </p>



<p>David    01:21:42    Yeah. We, we, we called this system self observation. And so it’s absolutely right, because any adaptive system, that’s what makes them interesting by the way, is that theorizing about themselves, right? And so if, if you write software, that’s interoperable with other software. So, um, you know, I do everything in Emacs and I’m constantly adding Emacs extensions and, you know, they’re written in enlists, you know, and, and they can control this, you know, oh, S text editor hybrid, um, that’s the right level at which to operate. And so John’s absolutely right. That’s, that’s the great ingenuity of hierarchy, right? That once you operate at that level, you can control at that level.  </p>



<p>Paul    01:22:22    So bringing this back to, um, we can bring it back to any level, but I’m thinking specifically of the, um, thinking in dynamical state space trajectories like John, you mentioned, you know, March Churchland, mark Churchland told you he does, is that understanding? I mean, so, so there’s, this question is, you know, can you use your way, uh, when you’re using a tool like neural networks, like, you know, dynamical systems theory, can you use your way to understanding without just by practice, you know, and, and, and making it implicit in your conceptions.  </p>



<p>John    01:22:57    Yeah. So as you know, direct and others have made much talked about that. If you going to be able to do intuition like Dirac and firemen, did they look at the equations and know their implications without going through the derivation? That’s because they’re skilled at it. Okay. So the direct makes a big point that in order to be able to do that intuitive work with this level, of course, greening, you have to become skilled and practiced at it. And so what mark was saying in a sense is that he’s become skilled at thinking with trajectories, just like people can look at Fineman diagrams. Okay. Um, and so that, so that is, but that is understanding it’s very important just to get your first part of your question. If I’m understanding is having, um, intelligible theories to build explanatory models, to map onto real phenomena. So let me give you an example.  </p>



<p>John    01:23:49    When any of you ever talk about the stretch reflex and somebody asks you what’s the reflex, you will think of the muscle spindle, the one afferent, the signups with the motor neuron back onto the muscle. Okay. And then you’ll think of an inhibitory engineer on going to the antagonist. Okay. So reflex, when you think about it, intuitively has mural objects in it. You’re thinking about axons and neurons. Okay. Now, when you get to what mark Churchill is doing, and motor cortex of Sherrington had lived, you could have seen what happens when you take that approach. He would have realized that people aren’t worrying about how neuron a connects to neuron B connects to neuron C connects to neuron D in all that detail and motor cortex, it’s abstracted up too coarse grained measures. They are neurally derived, but now their trajectories are state-based. So yeah, I’m working, I’m working on this paper. I think I told you, it’s almost done called the two views of the cognitive brain with a fantastic neuroscientist and philosopher ponder called David Barak, where we’re talking about this. And it’s possible to have a functional explanation of high-level behaviors, where you can have these psychological terms and add to them a neural piece, just like the axons in the shell, in the reflux. But the neural piece is a trajectory in a state space. It’s a dynamical object.  </p>



<p>David    01:25:19    It’s quite interesting, John, there’s a, I’ll give you another example of this. And again, in another domain, and I was in 2018, a mathematician called Peter Schultz when the fields medal for creating something called perfected spaces, which have to do with Gallois representations and so on. And one of the things that he pointed out is that when he is trying to solve a problem, the first thing that he really has to do is come up with almost a linguistic definition of the mathematical problem. If he doesn’t have that, he can’t do the math. Sure. Okay. Now he’s not, I mean, this is a fields medal, that’s extraordinary mathematician, right. And he’s saying that I need to move between levels. Exactly what John just described. It’s not that he can, he doesn’t operate at the machine code of mathematics. Right. He moves up and down. And I think one of the really interesting questions I would have you and John and so forth is could we come to an understanding of how many levels we should have forgiven observable and what would be the theory that would tell us how to optimally allocate our attention across these different levels?  </p>



<p>David    01:26:22    I mean, that’s something I think, which would be a very much a complexity problem. It feels very meta, but I imagine the nervous system has  </p>



<p>John    01:26:29    Yes, exactly. And I just, I exactly the nervous system has to do it. And it would be against all the other things that I’ve learned sort of through the Santa Fe Institute and David and other convexity scientists, which is, it would seem very odd to me is as behavior gets more complex and the neuronal aggravates and Corky cortex get larger, that the explanatory object is going to remain a sharing Tony and style description of a circuit. In other words, it’s the idea that your level of explanation will never change no matter how more complex your behavior becomes, what a bizarre thing that would be. Right. So in other words, one thing I can be sure of is I don’t think that circuits in insects and in the spinal cord or the machine code, and the idea that once you get into cognition and cortex, that you’re going to be able to revert to that level of description flies in the face of what other hierarchical systems and complex phenomenon do it is that you have to come up with new objects that are more abstract and maybe, and it’s actually, you know, another point you’re talking about Olaf Sporns and Danny Bassett versus what mark turtle, I find trajectories and state spaces.  </p>



<p>John    01:27:42    Give me a feeling of understanding that connectivity metrics utterly fail to that’s right. They just go do it for me. And it’s very interesting. And, you know, I have huge respect for Danny. She’s super bright, but she’s when she’s written most recently about the kind of understanding and hypotheses you can test with connectivity, they themselves are couched in connectivity language, is this area autonomous or very connected to this area? It’s just connectivity language again. Right. Whereas I find dynamical systems and trajectories seem to be something that adds to the psychological terms.  </p>



<p>David    01:28:25    It’s interesting, Paul, this gets to your opening remark about that sort of, um, plurality of frameworks that we need to understand something complex. Right. And I think, uh, obviously, maybe again, in my position, I I’m very open-minded. I feel as if they’re all great, right. As all, as they all, aluminate a phenomenon. And I think the problem is always the reduction to one, this belief that there’s only one best way of doing things. And I, and I think John’s right. I think for many people, genetical systems work for other, you know, these combinatorial algebraic structures work and, and we as brains by virtue histories presumably work differently. And I think if anything, complexities are kind of liberalism, right? It says that’s allow for the possibility of a multiplicity of approaches and not assume one Israel.  </p>



<p>John    01:29:19    And I wouldn’t be that, you know, it’s interesting. I don’t know, Paul have, you’ve read the new history of neuroscience. Matthew Cobb’s book the idea of the brain.  </p>



<p>Paul    01:29:27    No, but isn’t it just a list of metaphors? I I’ve not read it.  </p>



<p>John    01:29:32    Yeah, but it’s actually, well, no, I think actually as a scaffold for thinking, and it’s very good. I love the hit the history part and the early present. Um, I think once it gets into current neuroscience and prediction of the future, it gets more impoverished. But I don’t know whether that’s Matthew cob or whether the field itself is sort of asked them. Um, but it is a good book. I really do recommend it. It’s got lots of delicious, rich stuff in it, and he’s done a good job. It’s not easy to synthesize all that material, but I tell you, what’s fascinating about it is that he has a section at the end of the book where he talks about the future. And it’s very interesting that he begins by talking about emergence, but then drops it like a bad smell, right? It’s like, well, he, I think he said something like emergence is that on the satisfactory explanation before we get to the real explanation, right?  </p>



<p>John    01:30:32    And then he moves on to where he feels like the real progress we made is let’s get back down to the circuits and the neurons themselves, let’s study cognition in a fly where we have the sort of Sharon, Tony and connectivity map. And then we’ll do some sort of extrapolation cognition in humans. In other words, you see this tension in the field between not really wanting to talk about course greening and psychological terms and derived measures and saying, surely we can avoid that awful fate for our field, by going into a fly or a worm where we can have the same level of connectivity, detail, and intuition as we did for the stretch reflex. But now we can apply that understanding to something that we call cognition and then somehow extrapolate from that higher up than your access. In other words, you see that there’s this tension that just won’t go away.  </p>

</div></div>


<p></p>
]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/077-Krakauers.mp3" length="89642011"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[
















David, John, and I discuss the role of complexity science in the study of intelligence. In this first part, we talk about complexity itself, its role in neuroscience, emergence and levels of explanation, understanding, epistemology and ontology, and really quite a bit more.



Notes:



David’s page at the Santa Fe Institute.John’s BLAM lab website.Follow SFI on twitter: @sfiscience.BLAM on Twitter: @blamlab Related Krakauer stuff:At the limits of thought. An Aeon article by DavidComplex Time: Cognitive Regime Shift II – When/Why/How the Brain Breaks. A video conversation with both John and David.Complexity Podcast.Books mentioned:Worlds Hidden in Plain Sight: The Evolving Idea of Complexity at the Santa Fe Institute, ed. David Krakauer.Understanding Scientific Understanding by Henk de Regt.The Idea of the Brain by Matthew Cobb.New Dark Age: Technology and the End of the Future by James Bridle.The River of Consciousness by Oliver Sacks.







]]>
                </itunes:summary>
                                    <itunes:image href="https://episodes.castos.com/braininspired/images/art-krakauer-1-01.jpg"></itunes:image>
                                                                            <itunes:duration>01:33:04</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 076 Olaf Sporns: Network Neuroscience]]>
                </title>
                <pubDate>Sat, 04 Jul 2020 08:14:29 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-076-olaf-sporns-network-neuroscience</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-076-olaf-sporns-network-neuroscience</link>
                                <description>
                                            <![CDATA[
<img src="https://braininspired.co/wp-content/uploads/2020/07/art-sporns-01.jpg" alt="" class="wp-image-1064" />



<div class="wp-block-image"><a href="https://www.patreon.com/braininspired" target="_blank" rel="noreferrer noopener"><img src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></a></div>



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2020/07/olaf.jpeg" alt="" class="wp-image-1066" width="200" height="200" /></div>



<p>Olaf and I discuss the explosion of network neuroscience, which uses network science tools to map the structure (connectome) and activity of the brain at various spatial and temporal scales. We talk about the possibility of bridging physical and functional connectivity via communication dynamics, and about the relation between network science and artificial neural networks and plenty more.</p>



<p>Notes:</p>



<div class="wp-block-image"><a href="https://www.amazon.com/gp/product/0262528983/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=0262528983&amp;linkId=4ad16239c176d4296d2f5dd665dcf3d9" target="_blank" rel="noreferrer noopener"><img src="https://braininspired.co/wp-content/uploads/2020/07/NotB.jpg" alt="" class="wp-image-1065" /></a></div>



<ul><li><a href="https://cortex.sitehost.iu.edu/">Computational Cognitive Neuroscience Laboratory</a>.</li><li>Twitter: <a href="https://twitter.com/spornslab">@spornslab</a></li><li>His excellent book: <a href="https://www.amazon.com/gp/product/0262528983/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=0262528983&amp;linkId=4ad16239c176d4296d2f5dd665dcf3d9" target="_blank" rel="noreferrer noopener">Networks of the Brain</a>.</li><li>Related papers:<ul><li><a href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5485642/">Network Neuroscience</a>.</li><li><a href="https://www.neuroscience.cam.ac.uk/Uploads/nrn%20bullmore%20sporns%202012.pdf">The economy of brain network organization.</a></li><li><a href="https://www.nature.com/articles/nrn.2017.149">Communication dynamics in complex brain networks.</a></li></ul></li></ul>
]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[












Olaf and I discuss the explosion of network neuroscience, which uses network science tools to map the structure (connectome) and activity of the brain at various spatial and temporal scales. We talk about the possibility of bridging physical and functional connectivity via communication dynamics, and about the relation between network science and artificial neural networks and plenty more.



Notes:







Computational Cognitive Neuroscience Laboratory.Twitter: @spornslabHis excellent book: Networks of the Brain.Related papers:Network Neuroscience.The economy of brain network organization.Communication dynamics in complex brain networks.
]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 076 Olaf Sporns: Network Neuroscience]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[
<img src="https://braininspired.co/wp-content/uploads/2020/07/art-sporns-01.jpg" alt="" class="wp-image-1064" />



<div class="wp-block-image"><a href="https://www.patreon.com/braininspired" target="_blank" rel="noreferrer noopener"><img src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></a></div>



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2020/07/olaf.jpeg" alt="" class="wp-image-1066" width="200" height="200" /></div>



<p>Olaf and I discuss the explosion of network neuroscience, which uses network science tools to map the structure (connectome) and activity of the brain at various spatial and temporal scales. We talk about the possibility of bridging physical and functional connectivity via communication dynamics, and about the relation between network science and artificial neural networks and plenty more.</p>



<p>Notes:</p>



<div class="wp-block-image"><a href="https://www.amazon.com/gp/product/0262528983/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=0262528983&amp;linkId=4ad16239c176d4296d2f5dd665dcf3d9" target="_blank" rel="noreferrer noopener"><img src="https://braininspired.co/wp-content/uploads/2020/07/NotB.jpg" alt="" class="wp-image-1065" /></a></div>



<ul><li><a href="https://cortex.sitehost.iu.edu/">Computational Cognitive Neuroscience Laboratory</a>.</li><li>Twitter: <a href="https://twitter.com/spornslab">@spornslab</a></li><li>His excellent book: <a href="https://www.amazon.com/gp/product/0262528983/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=0262528983&amp;linkId=4ad16239c176d4296d2f5dd665dcf3d9" target="_blank" rel="noreferrer noopener">Networks of the Brain</a>.</li><li>Related papers:<ul><li><a href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5485642/">Network Neuroscience</a>.</li><li><a href="https://www.neuroscience.cam.ac.uk/Uploads/nrn%20bullmore%20sporns%202012.pdf">The economy of brain network organization.</a></li><li><a href="https://www.nature.com/articles/nrn.2017.149">Communication dynamics in complex brain networks.</a></li></ul></li></ul>
]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/076-Olaf-Sporns.mp3" length="102010013"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[












Olaf and I discuss the explosion of network neuroscience, which uses network science tools to map the structure (connectome) and activity of the brain at various spatial and temporal scales. We talk about the possibility of bridging physical and functional connectivity via communication dynamics, and about the relation between network science and artificial neural networks and plenty more.



Notes:







Computational Cognitive Neuroscience Laboratory.Twitter: @spornslabHis excellent book: Networks of the Brain.Related papers:Network Neuroscience.The economy of brain network organization.Communication dynamics in complex brain networks.
]]>
                </itunes:summary>
                                    <itunes:image href="https://episodes.castos.com/braininspired/images/art-sporns-01.jpg"></itunes:image>
                                                                            <itunes:duration>01:45:57</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 075 Jim DiCarlo: Reverse Engineering Vision]]>
                </title>
                <pubDate>Wed, 24 Jun 2020 10:29:51 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-075-jim-dicarlo-reverse-engineering-vision</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-075-jim-dicarlo-reverse-engineering-vision</link>
                                <description>
                                            <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2020/06/art-dicarlo-01-1.jpg" alt="" class="wp-image-1062" />



<div class="wp-block-image"><a href="https://www.patreon.com/braininspired" target="_blank" rel="noreferrer noopener"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></a></div>



<div class="wp-block-image"><img width="220" height="147" src="https://braininspired.co/wp-content/uploads/2020/06/dicarlo_1b.jpg" alt="" class="wp-image-1060" /></div>



<p>Jim and I discuss his reverse engineering approach to visual intelligence, using deep models optimized to perform object recognition tasks. We talk about the history of his work developing models to match the neural activity in the ventral visual stream, how deep learning connects with those models, and some of his recent work: adding recurrence to the models to account for more difficult object recognition, using unsupervised learning to account for plasticity in the visual stream, and controlling neural activity  by creating specific images for subjects to view.</p>



<p>Notes:</p>



<ul><li><a href="http://dicarlolab.mit.edu/">The DiCarlo Lab at MIT</a>.</li><li>Related papers:<ul><li><a href="https://mcgovern.mit.edu/wp-content/uploads/2019/01/7255.full_.pdf">Large-Scale, High-Resolution Comparison of the Core Visual Object Recognition Behavior of Humans, Monkeys, and State-of-the-Art Deep Artificial Neural Networks</a>.</li><li><a href="https://www.biorxiv.org/content/10.1101/2020.05.10.086959v1">Fast recurrent processing via ventral prefrontal cortex is needed by the primate ventral stream for robust core visual object recognition</a>.</li><li><a href="https://www.biorxiv.org/content/10.1101/2020.01.13.900837v1">Unsupervised changes in core object recognition behavioral performance are accurately predicted by unsupervised neural plasticity in inferior temporal cortex</a>.</li><li><a href="http://science.sciencemag.org/cgi/rapidpdf/364/6439/eaav9436?ijkey=iBRdlniG7iYuA&amp;keytype=ref&amp;siteid=sci" target="_blank" rel="noreferrer noopener">Neural population control via deep image synthesis.</a></li></ul></li></ul>
]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[












Jim and I discuss his reverse engineering approach to visual intelligence, using deep models optimized to perform object recognition tasks. We talk about the history of his work developing models to match the neural activity in the ventral visual stream, how deep learning connects with those models, and some of his recent work: adding recurrence to the models to account for more difficult object recognition, using unsupervised learning to account for plasticity in the visual stream, and controlling neural activity  by creating specific images for subjects to view.



Notes:



The DiCarlo Lab at MIT.Related papers:Large-Scale, High-Resolution Comparison of the Core Visual Object Recognition Behavior of Humans, Monkeys, and State-of-the-Art Deep Artificial Neural Networks.Fast recurrent processing via ventral prefrontal cortex is needed by the primate ventral stream for robust core visual object recognition.Unsupervised changes in core object recognition behavioral performance are accurately predicted by unsupervised neural plasticity in inferior temporal cortex.Neural population control via deep image synthesis.
]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 075 Jim DiCarlo: Reverse Engineering Vision]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[
<img width="800" height="418" src="https://braininspired.co/wp-content/uploads/2020/06/art-dicarlo-01-1.jpg" alt="" class="wp-image-1062" />



<div class="wp-block-image"><a href="https://www.patreon.com/braininspired" target="_blank" rel="noreferrer noopener"><img width="434" height="102" src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></a></div>



<div class="wp-block-image"><img width="220" height="147" src="https://braininspired.co/wp-content/uploads/2020/06/dicarlo_1b.jpg" alt="" class="wp-image-1060" /></div>



<p>Jim and I discuss his reverse engineering approach to visual intelligence, using deep models optimized to perform object recognition tasks. We talk about the history of his work developing models to match the neural activity in the ventral visual stream, how deep learning connects with those models, and some of his recent work: adding recurrence to the models to account for more difficult object recognition, using unsupervised learning to account for plasticity in the visual stream, and controlling neural activity  by creating specific images for subjects to view.</p>



<p>Notes:</p>



<ul><li><a href="http://dicarlolab.mit.edu/">The DiCarlo Lab at MIT</a>.</li><li>Related papers:<ul><li><a href="https://mcgovern.mit.edu/wp-content/uploads/2019/01/7255.full_.pdf">Large-Scale, High-Resolution Comparison of the Core Visual Object Recognition Behavior of Humans, Monkeys, and State-of-the-Art Deep Artificial Neural Networks</a>.</li><li><a href="https://www.biorxiv.org/content/10.1101/2020.05.10.086959v1">Fast recurrent processing via ventral prefrontal cortex is needed by the primate ventral stream for robust core visual object recognition</a>.</li><li><a href="https://www.biorxiv.org/content/10.1101/2020.01.13.900837v1">Unsupervised changes in core object recognition behavioral performance are accurately predicted by unsupervised neural plasticity in inferior temporal cortex</a>.</li><li><a href="http://science.sciencemag.org/cgi/rapidpdf/364/6439/eaav9436?ijkey=iBRdlniG7iYuA&amp;keytype=ref&amp;siteid=sci" target="_blank" rel="noreferrer noopener">Neural population control via deep image synthesis.</a></li></ul></li></ul>
]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/075-Jim-DiCarlo-public.mp3" length="73308380"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[












Jim and I discuss his reverse engineering approach to visual intelligence, using deep models optimized to perform object recognition tasks. We talk about the history of his work developing models to match the neural activity in the ventral visual stream, how deep learning connects with those models, and some of his recent work: adding recurrence to the models to account for more difficult object recognition, using unsupervised learning to account for plasticity in the visual stream, and controlling neural activity  by creating specific images for subjects to view.



Notes:



The DiCarlo Lab at MIT.Related papers:Large-Scale, High-Resolution Comparison of the Core Visual Object Recognition Behavior of Humans, Monkeys, and State-of-the-Art Deep Artificial Neural Networks.Fast recurrent processing via ventral prefrontal cortex is needed by the primate ventral stream for robust core visual object recognition.Unsupervised changes in core object recognition behavioral performance are accurately predicted by unsupervised neural plasticity in inferior temporal cortex.Neural population control via deep image synthesis.
]]>
                </itunes:summary>
                                    <itunes:image href="https://episodes.castos.com/braininspired/images/art-dicarlo-01.jpg"></itunes:image>
                                                                            <itunes:duration>01:16:03</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 074 Ginger Campbell: Are You Sure?]]>
                </title>
                <pubDate>Tue, 16 Jun 2020 06:28:29 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-074-ginger-campbell-are-you-sure</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-074-ginger-campbell-are-you-sure</link>
                                <description>
                                            <![CDATA[
<img src="https://braininspired.co/wp-content/uploads/2020/06/art-campbell-01.jpg" alt="" class="wp-image-1056" />



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2020/06/download.jpeg" alt="" class="wp-image-1057" width="237" height="296" /></div>



<p>Ginger and I discuss her book <a href="https://www.amazon.com/gp/product/1951591259/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=1951591259&amp;linkId=514d6ea64c465eb734739f2d795172d8" target="_blank" rel="noreferrer noopener">Are You Sure? The Unconscious Origins of Certainty</a>, which summarizes Richard Burton's work exploring the experience and phenomenal origin of feeling confident, and how the vast majority of our brain processing occurs outside our conscious awareness.</p>



<ul><li><a href="https://www.amazon.com/gp/product/1951591259/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=1951591259&amp;linkId=514d6ea64c465eb734739f2d795172d8" target="_blank" rel="noreferrer noopener">Are You Sure? The Unconscious Origins of Certainty.</a></li><li><a href="https://brainsciencepodcast.com/" target="_blank" rel="noreferrer noopener">Brain Science Podcast</a>.</li></ul>



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2020/06/51FEFVFDXNL._SX331_BO1204203200_.jpg" alt="" class="wp-image-1058" width="167" height="250" /></div>
]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[








Ginger and I discuss her book Are You Sure? The Unconscious Origins of Certainty, which summarizes Richard Burton's work exploring the experience and phenomenal origin of feeling confident, and how the vast majority of our brain processing occurs outside our conscious awareness.



Are You Sure? The Unconscious Origins of Certainty.Brain Science Podcast.




]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 074 Ginger Campbell: Are You Sure?]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[
<img src="https://braininspired.co/wp-content/uploads/2020/06/art-campbell-01.jpg" alt="" class="wp-image-1056" />



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2020/06/download.jpeg" alt="" class="wp-image-1057" width="237" height="296" /></div>



<p>Ginger and I discuss her book <a href="https://www.amazon.com/gp/product/1951591259/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=1951591259&amp;linkId=514d6ea64c465eb734739f2d795172d8" target="_blank" rel="noreferrer noopener">Are You Sure? The Unconscious Origins of Certainty</a>, which summarizes Richard Burton's work exploring the experience and phenomenal origin of feeling confident, and how the vast majority of our brain processing occurs outside our conscious awareness.</p>



<ul><li><a href="https://www.amazon.com/gp/product/1951591259/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=1951591259&amp;linkId=514d6ea64c465eb734739f2d795172d8" target="_blank" rel="noreferrer noopener">Are You Sure? The Unconscious Origins of Certainty.</a></li><li><a href="https://brainsciencepodcast.com/" target="_blank" rel="noreferrer noopener">Brain Science Podcast</a>.</li></ul>



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2020/06/51FEFVFDXNL._SX331_BO1204203200_.jpg" alt="" class="wp-image-1058" width="167" height="250" /></div>
]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/074-Ginger-Campbell.mp3" length="79349352"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[








Ginger and I discuss her book Are You Sure? The Unconscious Origins of Certainty, which summarizes Richard Burton's work exploring the experience and phenomenal origin of feeling confident, and how the vast majority of our brain processing occurs outside our conscious awareness.



Are You Sure? The Unconscious Origins of Certainty.Brain Science Podcast.




]]>
                </itunes:summary>
                                    <itunes:image href="https://episodes.castos.com/braininspired/images/art-campbell-01.jpg"></itunes:image>
                                                                            <itunes:duration>01:22:35</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 073 Megan Peters: Consciousness and Metacognition]]>
                </title>
                <pubDate>Wed, 10 Jun 2020 16:47:00 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-073-megan-peters-consciousness-and-metacognition</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-073-megan-peters-consciousness-and-metacognition</link>
                                <description>
                                            <![CDATA[
<img class="wp-image-1038" src="https://braininspired.co/wp-content/uploads/2020/06/art-peters-01.jpg" alt="" />

<p> </p>

<div class="wp-block-image">
<a href="https://www.patreon.com/braininspired" target="_blank" rel="noreferrer noopener"><img class="wp-image-585" src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" /></a>
</div>

<p> </p>

<div class="wp-block-image">
<img class="wp-image-1037" src="https://braininspired.co/wp-content/uploads/2020/06/Peters_headshot.jpg" alt="" />
</div>

<p> </p>

<p>Megan and I discuss her work using metacognition as a way to study subjective awareness, or confidence. We talk about using computational and neural network models to probe how decisions are related to our confidence, the current state of the science of consciousness, and her newest project using fMRI decoded neurofeedback to induce particular brain states in subjects so we can learn about conscious and unconscious brain processing.</p>

<p> </p>

<p>Notes:</p>

<p> </p>

<ul>
<li>Visit Megan's <a href="https://faculty.sites.uci.edu/cnclab/">cognitive &amp; neural computation lab</a>.</li>
<li>Twitter: <a href="https://twitter.com/meganakpeters">@meganakpeters</a></li>
<li>The papers we discuss or mention:
<ul>
<li><a href="https://www.nature.com/articles/s41562-017-0139.epdf?author_access_token=PIivmQNeHj1d2HKkq4W3otRgN0jAjWel9jnR3ZoTv0PC8is0OZnuZsuv_4TNT6KUuIKXCG2U78BS-vo5Fm2rx51ks8BpVKmjLS9KbwmWyoPghOKwHdB7o5mhW7Y8fw3HAkrMPaZRfgcKE1sUczW-zA%3D%3D">Human intracranial electrophysiology suggests suboptimal calculations underlie perceptual confidence</a></li>
<li><a href="https://www.biorxiv.org/content/10.1101/558858v2">Tuned normalization in perceptual decision-making circuits can explain seemingly suboptimal confidence behavior.</a></li>
</ul>
</li>
</ul>
]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[


 





 





 

Megan and I discuss her work using metacognition as a way to study subjective awareness, or confidence. We talk about using computational and neural network models to probe how decisions are related to our confidence, the current state of the science of consciousness, and her newest project using fMRI decoded neurofeedback to induce particular brain states in subjects so we can learn about conscious and unconscious brain processing.

 

Notes:

 


Visit Megan's cognitive & neural computation lab.
Twitter: @meganakpeters
The papers we discuss or mention:

Human intracranial electrophysiology suggests suboptimal calculations underlie perceptual confidence
Tuned normalization in perceptual decision-making circuits can explain seemingly suboptimal confidence behavior.



]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 073 Megan Peters: Consciousness and Metacognition]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[
<img class="wp-image-1038" src="https://braininspired.co/wp-content/uploads/2020/06/art-peters-01.jpg" alt="" />

<p> </p>

<div class="wp-block-image">
<a href="https://www.patreon.com/braininspired" target="_blank" rel="noreferrer noopener"><img class="wp-image-585" src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" /></a>
</div>

<p> </p>

<div class="wp-block-image">
<img class="wp-image-1037" src="https://braininspired.co/wp-content/uploads/2020/06/Peters_headshot.jpg" alt="" />
</div>

<p> </p>

<p>Megan and I discuss her work using metacognition as a way to study subjective awareness, or confidence. We talk about using computational and neural network models to probe how decisions are related to our confidence, the current state of the science of consciousness, and her newest project using fMRI decoded neurofeedback to induce particular brain states in subjects so we can learn about conscious and unconscious brain processing.</p>

<p> </p>

<p>Notes:</p>

<p> </p>

<ul>
<li>Visit Megan's <a href="https://faculty.sites.uci.edu/cnclab/">cognitive &amp; neural computation lab</a>.</li>
<li>Twitter: <a href="https://twitter.com/meganakpeters">@meganakpeters</a></li>
<li>The papers we discuss or mention:
<ul>
<li><a href="https://www.nature.com/articles/s41562-017-0139.epdf?author_access_token=PIivmQNeHj1d2HKkq4W3otRgN0jAjWel9jnR3ZoTv0PC8is0OZnuZsuv_4TNT6KUuIKXCG2U78BS-vo5Fm2rx51ks8BpVKmjLS9KbwmWyoPghOKwHdB7o5mhW7Y8fw3HAkrMPaZRfgcKE1sUczW-zA%3D%3D">Human intracranial electrophysiology suggests suboptimal calculations underlie perceptual confidence</a></li>
<li><a href="https://www.biorxiv.org/content/10.1101/558858v2">Tuned normalization in perceptual decision-making circuits can explain seemingly suboptimal confidence behavior.</a></li>
</ul>
</li>
</ul>
]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/073-Megan-Peters-Public.mp3" length="81832939"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[


 





 





 

Megan and I discuss her work using metacognition as a way to study subjective awareness, or confidence. We talk about using computational and neural network models to probe how decisions are related to our confidence, the current state of the science of consciousness, and her newest project using fMRI decoded neurofeedback to induce particular brain states in subjects so we can learn about conscious and unconscious brain processing.

 

Notes:

 


Visit Megan's cognitive & neural computation lab.
Twitter: @meganakpeters
The papers we discuss or mention:

Human intracranial electrophysiology suggests suboptimal calculations underlie perceptual confidence
Tuned normalization in perceptual decision-making circuits can explain seemingly suboptimal confidence behavior.



]]>
                </itunes:summary>
                                    <itunes:image href="https://episodes.castos.com/braininspired/images/art-peters-02.jpg"></itunes:image>
                                                                            <itunes:duration>01:25:10</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 072 Mazviita Chirimuuta: Understanding, Prediction, and Reality]]>
                </title>
                <pubDate>Mon, 01 Jun 2020 13:24:52 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-072-mazviita-chirimuuta-understanding-prediction-and-reality</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-072-mazviita-chirimuuta-understanding-prediction-and-reality</link>
                                <description>
                                            <![CDATA[
<img src="https://braininspired.co/wp-content/uploads/2020/06/art-chirimuuta-med-01.jpg" alt="" class="wp-image-1035" />



<div class="wp-block-image"><a href="https://www.patreon.com/braininspired" target="_blank" rel="noreferrer noopener"><img src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></a></div>



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2020/06/MC-pic.jpg" alt="" class="wp-image-1034" width="340" height="300" /></div>



<p>Mazviita and I discuss the growing divide between prediction and understanding as neuroscience models and deep learning networks become bigger and more complex. She describes her non-factive account of understanding, which among other things suggests that the best predictive models may deliver less understanding. We also discuss the brain as a computer metaphor, and whether it's really possible to ignore all the traditionally "non-computational" parts of the brain like metabolism and other life processes.</p>



<div class="wp-block-image"><a href="https://www.amazon.com/gp/product/0262534576/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=0262534576&amp;linkId=54ffad9583f44eccb801b830f628ba91" target="_blank" rel="noreferrer noopener"><img src="https://braininspired.co/wp-content/uploads/2020/06/outside-color.jpg" alt="" class="wp-image-1033" /></a></div>



<p>Show notes:</p>



<ul><li>Her <a href="https://www.hps.pitt.edu/people/mazviita-chirimuuta" target="_blank" rel="noreferrer noopener">website</a>.</li><li><a href="https://outsidecolour.net/" target="_blank" rel="noreferrer noopener">Outside color website</a> (with links to more of her publications)</li><li>Her book <a href="https://www.amazon.com/gp/product/0262534576/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=0262534576&amp;linkId=54ffad9583f44eccb801b830f628ba91" target="_blank" rel="noreferrer noopener">Outside Color: Perceptual Science and the Puzzle of Color in Philosophy</a>.</li><li>Papers we discuss or mention:<ul><li><a href="https://outsidecolour.files.wordpress.com/2020/05/chirimuuta-penultimate-prediction-vs.-understanding.pdf">Prediction Versus Understanding in Computationally Enhanced Neuroscience</a>.</li><li><a href="https://outsidecolour.files.wordpress.com/2019/10/chirimuuta-brain-computer-analogy-v2.docx">Your brain is like a computer: function, analogy, simplification.</a></li><li><a href="https://outsidecolour.files.wordpress.com/2019/09/chirimuuta-forthcoming-heraclitean-brain.pdf">Charting the Heraclitean Brain: Perspectivism and Simplification in Models of the Motor Cortex</a>.</li></ul></li></ul>
]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[












Mazviita and I discuss the growing divide between prediction and understanding as neuroscience models and deep learning networks become bigger and more complex. She describes her non-factive account of understanding, which among other things suggests that the best predictive models may deliver less understanding. We also discuss the brain as a computer metaphor, and whether it's really possible to ignore all the traditionally "non-computational" parts of the brain like metabolism and other life processes.







Show notes:



Her website.Outside color website (with links to more of her publications)Her book Outside Color: Perceptual Science and the Puzzle of Color in Philosophy.Papers we discuss or mention:Prediction Versus Understanding in Computationally Enhanced Neuroscience.Your brain is like a computer: function, analogy, simplification.Charting the Heraclitean Brain: Perspectivism and Simplification in Models of the Motor Cortex.
]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 072 Mazviita Chirimuuta: Understanding, Prediction, and Reality]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[
<img src="https://braininspired.co/wp-content/uploads/2020/06/art-chirimuuta-med-01.jpg" alt="" class="wp-image-1035" />



<div class="wp-block-image"><a href="https://www.patreon.com/braininspired" target="_blank" rel="noreferrer noopener"><img src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></a></div>



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2020/06/MC-pic.jpg" alt="" class="wp-image-1034" width="340" height="300" /></div>



<p>Mazviita and I discuss the growing divide between prediction and understanding as neuroscience models and deep learning networks become bigger and more complex. She describes her non-factive account of understanding, which among other things suggests that the best predictive models may deliver less understanding. We also discuss the brain as a computer metaphor, and whether it's really possible to ignore all the traditionally "non-computational" parts of the brain like metabolism and other life processes.</p>



<div class="wp-block-image"><a href="https://www.amazon.com/gp/product/0262534576/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=0262534576&amp;linkId=54ffad9583f44eccb801b830f628ba91" target="_blank" rel="noreferrer noopener"><img src="https://braininspired.co/wp-content/uploads/2020/06/outside-color.jpg" alt="" class="wp-image-1033" /></a></div>



<p>Show notes:</p>



<ul><li>Her <a href="https://www.hps.pitt.edu/people/mazviita-chirimuuta" target="_blank" rel="noreferrer noopener">website</a>.</li><li><a href="https://outsidecolour.net/" target="_blank" rel="noreferrer noopener">Outside color website</a> (with links to more of her publications)</li><li>Her book <a href="https://www.amazon.com/gp/product/0262534576/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=0262534576&amp;linkId=54ffad9583f44eccb801b830f628ba91" target="_blank" rel="noreferrer noopener">Outside Color: Perceptual Science and the Puzzle of Color in Philosophy</a>.</li><li>Papers we discuss or mention:<ul><li><a href="https://outsidecolour.files.wordpress.com/2020/05/chirimuuta-penultimate-prediction-vs.-understanding.pdf">Prediction Versus Understanding in Computationally Enhanced Neuroscience</a>.</li><li><a href="https://outsidecolour.files.wordpress.com/2019/10/chirimuuta-brain-computer-analogy-v2.docx">Your brain is like a computer: function, analogy, simplification.</a></li><li><a href="https://outsidecolour.files.wordpress.com/2019/09/chirimuuta-forthcoming-heraclitean-brain.pdf">Charting the Heraclitean Brain: Perspectivism and Simplification in Models of the Motor Cortex</a>.</li></ul></li></ul>
]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/072-Mazviita-Chirimuuta-public.mp3" length="75872555"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[












Mazviita and I discuss the growing divide between prediction and understanding as neuroscience models and deep learning networks become bigger and more complex. She describes her non-factive account of understanding, which among other things suggests that the best predictive models may deliver less understanding. We also discuss the brain as a computer metaphor, and whether it's really possible to ignore all the traditionally "non-computational" parts of the brain like metabolism and other life processes.







Show notes:



Her website.Outside color website (with links to more of her publications)Her book Outside Color: Perceptual Science and the Puzzle of Color in Philosophy.Papers we discuss or mention:Prediction Versus Understanding in Computationally Enhanced Neuroscience.Your brain is like a computer: function, analogy, simplification.Charting the Heraclitean Brain: Perspectivism and Simplification in Models of the Motor Cortex.
]]>
                </itunes:summary>
                                    <itunes:image href="https://episodes.castos.com/braininspired/images/art-chirimuuta-med-01.jpg"></itunes:image>
                                                                            <itunes:duration>01:18:53</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 071 J. Patrick Mayo: The Path To Faculty]]>
                </title>
                <pubDate>Mon, 25 May 2020 06:01:36 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-071-j-patrick-mayo-the-path-to-faculty</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-071-j-patrick-mayo-the-path-to-faculty</link>
                                <description>
                                            <![CDATA[
<img src="https://braininspired.co/wp-content/uploads/2020/05/art-mayo-01.jpg" alt="" class="wp-image-1030" />



<div class="wp-block-image"><a href="https://www.patreon.com/braininspired" target="_blank" rel="noreferrer noopener"><img src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></a></div>



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2020/05/Mayo_J_Patrick-headshot.jpg" alt="" class="wp-image-1029" /></div>



<p>Patrick and I mostly discuss his path from a technician in the then nascent Jim DiCarlo lab, through his graduate school and two postdoc experiences, and finally landing a faculty position, plus the culture and issues in academia in general. We also cover plenty of science, like the role of eye movements in the study of vision, the neuroscience (and concept) of attention, what Patrick thinks of the deep learning hype, and more. </p>



<p>But, this is a special episode, less about the science and more about the experience of an academic neuroscience trajectory/life. Episodes like this will appear in Patreon supporters' private feeds from now on.</p>



<p>Show notes:</p>



<ul><li>His pre-lab website <a href="https://ophthalmology.pitt.edu/people/j-patrick-mayo-phd">university page</a>.</li><li>Twitter: <a href="https://twitter.com/mayo_lab">@mayo_lab</a>.</li><li>Here’s the paper he recommends to understand attention:<ul><li><a href="https://www.pnas.org/content/pnas/116/52/26187.full.pdf">Attention can be subdivided into neurobiological components corresponding to distinct behavioral effects.</a></li></ul></li></ul>
]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[












Patrick and I mostly discuss his path from a technician in the then nascent Jim DiCarlo lab, through his graduate school and two postdoc experiences, and finally landing a faculty position, plus the culture and issues in academia in general. We also cover plenty of science, like the role of eye movements in the study of vision, the neuroscience (and concept) of attention, what Patrick thinks of the deep learning hype, and more. 



But, this is a special episode, less about the science and more about the experience of an academic neuroscience trajectory/life. Episodes like this will appear in Patreon supporters' private feeds from now on.



Show notes:



His pre-lab website university page.Twitter: @mayo_lab.Here’s the paper he recommends to understand attention:Attention can be subdivided into neurobiological components corresponding to distinct behavioral effects.
]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 071 J. Patrick Mayo: The Path To Faculty]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[
<img src="https://braininspired.co/wp-content/uploads/2020/05/art-mayo-01.jpg" alt="" class="wp-image-1030" />



<div class="wp-block-image"><a href="https://www.patreon.com/braininspired" target="_blank" rel="noreferrer noopener"><img src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></a></div>



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2020/05/Mayo_J_Patrick-headshot.jpg" alt="" class="wp-image-1029" /></div>



<p>Patrick and I mostly discuss his path from a technician in the then nascent Jim DiCarlo lab, through his graduate school and two postdoc experiences, and finally landing a faculty position, plus the culture and issues in academia in general. We also cover plenty of science, like the role of eye movements in the study of vision, the neuroscience (and concept) of attention, what Patrick thinks of the deep learning hype, and more. </p>



<p>But, this is a special episode, less about the science and more about the experience of an academic neuroscience trajectory/life. Episodes like this will appear in Patreon supporters' private feeds from now on.</p>



<p>Show notes:</p>



<ul><li>His pre-lab website <a href="https://ophthalmology.pitt.edu/people/j-patrick-mayo-phd">university page</a>.</li><li>Twitter: <a href="https://twitter.com/mayo_lab">@mayo_lab</a>.</li><li>Here’s the paper he recommends to understand attention:<ul><li><a href="https://www.pnas.org/content/pnas/116/52/26187.full.pdf">Attention can be subdivided into neurobiological components corresponding to distinct behavioral effects.</a></li></ul></li></ul>
]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/071-Patrick-Mayo.mp3" length="68256539"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[












Patrick and I mostly discuss his path from a technician in the then nascent Jim DiCarlo lab, through his graduate school and two postdoc experiences, and finally landing a faculty position, plus the culture and issues in academia in general. We also cover plenty of science, like the role of eye movements in the study of vision, the neuroscience (and concept) of attention, what Patrick thinks of the deep learning hype, and more. 



But, this is a special episode, less about the science and more about the experience of an academic neuroscience trajectory/life. Episodes like this will appear in Patreon supporters' private feeds from now on.



Show notes:



His pre-lab website university page.Twitter: @mayo_lab.Here’s the paper he recommends to understand attention:Attention can be subdivided into neurobiological components corresponding to distinct behavioral effects.
]]>
                </itunes:summary>
                                    <itunes:image href="https://episodes.castos.com/braininspired/images/art-mayo-square-01.jpg"></itunes:image>
                                                                            <itunes:duration>01:10:57</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 070 Bradley Love: How We Learn Concepts]]>
                </title>
                <pubDate>Fri, 15 May 2020 06:18:33 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-070-bradley-love-how-we-learn-concepts</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-070-bradley-love-how-we-learn-concepts</link>
                                <description>
                                            <![CDATA[
<img src="https://braininspired.co/wp-content/uploads/2020/05/art-love-01.jpg" alt="" class="wp-image-1020" />



<div class="wp-block-image"><a href="https://www.patreon.com/braininspired" target="_blank" rel="noreferrer noopener"><img src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></a></div>



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2020/05/brad.png" alt="" class="wp-image-1021" width="190" height="263" /></div>



<p>Brad and I discuss his battle-tested, age-defying cognitive model for how we learn and store concepts by forming and rearranging clusters, how the model maps onto brain areas, and how he's using deep learning models to explore how attention and sensory information interact with concept formation. We also discuss the cognitive modeling approach, Marr's levels of analysis, the term "biological plausibility", emergence and reduction, and plenty more.</p>



<p>Notes:</p>



<ul><li>Visit Brad’s <a href="https://bradlove.org/" target="_blank" rel="noreferrer noopener">website</a>.</li><li>Follow Brad on twitter: <a href="https://twitter.com/ProfData" target="_blank" rel="noreferrer noopener">@ProfData</a>.</li><li>Related papers:<ul><li><a href="https://psyarxiv.com/49vea/download" target="_blank" rel="noreferrer noopener">Levels of Biological Plausibility</a>.</li><li><a href="http://bradlove.org/papers/LoveGureckis_2007.pdf">Models in search of a brain.</a></li><li><a href="https://www.nature.com/articles/s41467-019-13760-8.pdf">A non-spatial account of place and grid cells based on clustering models of concept learning</a>.</li><li><a href="https://www.biorxiv.org/content/10.1101/2020.02.13.947341v1.full.pdf">Abstract neural representations of category membership beyond information coding stimulus or response.</a></li><li><a href="https://www.nature.com/articles/s41467-019-13930-8.pdf">Ventromedial prefrontal cortex compression during concept learning</a>.</li><li><a href="https://arxiv.org/abs/2002.02342">The Costs and Benefits of Goal-Directed Attention in Deep Convolutional Neural Networks</a></li><li><a href="https://arxiv.org/pdf/1906.09012.pdf">Learning as the unsupervised alignment of conceptual systems.</a></li></ul></li></ul>
]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[












Brad and I discuss his battle-tested, age-defying cognitive model for how we learn and store concepts by forming and rearranging clusters, how the model maps onto brain areas, and how he's using deep learning models to explore how attention and sensory information interact with concept formation. We also discuss the cognitive modeling approach, Marr's levels of analysis, the term "biological plausibility", emergence and reduction, and plenty more.



Notes:



Visit Brad’s website.Follow Brad on twitter: @ProfData.Related papers:Levels of Biological Plausibility.Models in search of a brain.A non-spatial account of place and grid cells based on clustering models of concept learning.Abstract neural representations of category membership beyond information coding stimulus or response.Ventromedial prefrontal cortex compression during concept learning.The Costs and Benefits of Goal-Directed Attention in Deep Convolutional Neural NetworksLearning as the unsupervised alignment of conceptual systems.
]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 070 Bradley Love: How We Learn Concepts]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[
<img src="https://braininspired.co/wp-content/uploads/2020/05/art-love-01.jpg" alt="" class="wp-image-1020" />



<div class="wp-block-image"><a href="https://www.patreon.com/braininspired" target="_blank" rel="noreferrer noopener"><img src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></a></div>



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2020/05/brad.png" alt="" class="wp-image-1021" width="190" height="263" /></div>



<p>Brad and I discuss his battle-tested, age-defying cognitive model for how we learn and store concepts by forming and rearranging clusters, how the model maps onto brain areas, and how he's using deep learning models to explore how attention and sensory information interact with concept formation. We also discuss the cognitive modeling approach, Marr's levels of analysis, the term "biological plausibility", emergence and reduction, and plenty more.</p>



<p>Notes:</p>



<ul><li>Visit Brad’s <a href="https://bradlove.org/" target="_blank" rel="noreferrer noopener">website</a>.</li><li>Follow Brad on twitter: <a href="https://twitter.com/ProfData" target="_blank" rel="noreferrer noopener">@ProfData</a>.</li><li>Related papers:<ul><li><a href="https://psyarxiv.com/49vea/download" target="_blank" rel="noreferrer noopener">Levels of Biological Plausibility</a>.</li><li><a href="http://bradlove.org/papers/LoveGureckis_2007.pdf">Models in search of a brain.</a></li><li><a href="https://www.nature.com/articles/s41467-019-13760-8.pdf">A non-spatial account of place and grid cells based on clustering models of concept learning</a>.</li><li><a href="https://www.biorxiv.org/content/10.1101/2020.02.13.947341v1.full.pdf">Abstract neural representations of category membership beyond information coding stimulus or response.</a></li><li><a href="https://www.nature.com/articles/s41467-019-13930-8.pdf">Ventromedial prefrontal cortex compression during concept learning</a>.</li><li><a href="https://arxiv.org/abs/2002.02342">The Costs and Benefits of Goal-Directed Attention in Deep Convolutional Neural Networks</a></li><li><a href="https://arxiv.org/pdf/1906.09012.pdf">Learning as the unsupervised alignment of conceptual systems.</a></li></ul></li></ul>
]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/070-Bradley-Love.mp3" length="102975250"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[












Brad and I discuss his battle-tested, age-defying cognitive model for how we learn and store concepts by forming and rearranging clusters, how the model maps onto brain areas, and how he's using deep learning models to explore how attention and sensory information interact with concept formation. We also discuss the cognitive modeling approach, Marr's levels of analysis, the term "biological plausibility", emergence and reduction, and plenty more.



Notes:



Visit Brad’s website.Follow Brad on twitter: @ProfData.Related papers:Levels of Biological Plausibility.Models in search of a brain.A non-spatial account of place and grid cells based on clustering models of concept learning.Abstract neural representations of category membership beyond information coding stimulus or response.Ventromedial prefrontal cortex compression during concept learning.The Costs and Benefits of Goal-Directed Attention in Deep Convolutional Neural NetworksLearning as the unsupervised alignment of conceptual systems.
]]>
                </itunes:summary>
                                    <itunes:image href="https://episodes.castos.com/braininspired/images/art-love-01.jpg"></itunes:image>
                                                                            <itunes:duration>01:47:07</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 069 David Ferrucci: Machines To Understand Stories]]>
                </title>
                <pubDate>Tue, 05 May 2020 10:14:57 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-069-david-ferrucci-machines-to-understand-stories</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-069-david-ferrucci-machines-to-understand-stories</link>
                                <description>
                                            <![CDATA[
<img src="https://braininspired.co/wp-content/uploads/2020/05/twitter-ferrucci-01.jpg" alt="" class="wp-image-992" />



<div class="wp-block-image"><a href="https://www.patreon.com/braininspired" target="_blank" rel="noreferrer noopener"><img src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></a></div>



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2020/05/download.jpeg" alt="" class="wp-image-993" width="360" height="240" /></div>



<p>David and I discuss the latest efforts he and his Elemental Cognition team have made to create machines that can understand stories the way humans can and do. The long term vision is to create what David calls "thought partners", which are virtual assistants that can learn and synthesize a massive amount of information for us when we need that information for whatever project we're working on. We also discuss the nature of understanding, language, the role of the biological sciences for AI, and more.</p>



<ul><li>Dave’s business <a href="https://www.elementalcognition.com/">Elemental Cognition.</a></li><li>The paper we discuss:<ul><li><a href="https://arxiv.org/abs/2005.01525" target="_blank" rel="noreferrer noopener">To Test Machine Comprehension, Start by Defining Comprehension.</a><br /></li></ul></li></ul>
]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[












David and I discuss the latest efforts he and his Elemental Cognition team have made to create machines that can understand stories the way humans can and do. The long term vision is to create what David calls "thought partners", which are virtual assistants that can learn and synthesize a massive amount of information for us when we need that information for whatever project we're working on. We also discuss the nature of understanding, language, the role of the biological sciences for AI, and more.



Dave’s business Elemental Cognition.The paper we discuss:To Test Machine Comprehension, Start by Defining Comprehension.
]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 069 David Ferrucci: Machines To Understand Stories]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[
<img src="https://braininspired.co/wp-content/uploads/2020/05/twitter-ferrucci-01.jpg" alt="" class="wp-image-992" />



<div class="wp-block-image"><a href="https://www.patreon.com/braininspired" target="_blank" rel="noreferrer noopener"><img src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></a></div>



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2020/05/download.jpeg" alt="" class="wp-image-993" width="360" height="240" /></div>



<p>David and I discuss the latest efforts he and his Elemental Cognition team have made to create machines that can understand stories the way humans can and do. The long term vision is to create what David calls "thought partners", which are virtual assistants that can learn and synthesize a massive amount of information for us when we need that information for whatever project we're working on. We also discuss the nature of understanding, language, the role of the biological sciences for AI, and more.</p>



<ul><li>Dave’s business <a href="https://www.elementalcognition.com/">Elemental Cognition.</a></li><li>The paper we discuss:<ul><li><a href="https://arxiv.org/abs/2005.01525" target="_blank" rel="noreferrer noopener">To Test Machine Comprehension, Start by Defining Comprehension.</a><br /></li></ul></li></ul>
]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/069-David-Ferrucci.mp3" length="83254942"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[












David and I discuss the latest efforts he and his Elemental Cognition team have made to create machines that can understand stories the way humans can and do. The long term vision is to create what David calls "thought partners", which are virtual assistants that can learn and synthesize a massive amount of information for us when we need that information for whatever project we're working on. We also discuss the nature of understanding, language, the role of the biological sciences for AI, and more.



Dave’s business Elemental Cognition.The paper we discuss:To Test Machine Comprehension, Start by Defining Comprehension.
]]>
                </itunes:summary>
                                    <itunes:image href="https://episodes.castos.com/braininspired/images/twitter-ferrucci-01.jpg"></itunes:image>
                                                                            <itunes:duration>01:26:35</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 068 Rodrigo Quian Quiroga: NeuroScience Fiction]]>
                </title>
                <pubDate>Fri, 24 Apr 2020 07:45:58 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-068-rodrigo-quian-quiroga-neuroscience-fiction</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-068-rodrigo-quian-quiroga-neuroscience-fiction</link>
                                <description>
                                            <![CDATA[
<img src="https://braininspired.co/wp-content/uploads/2020/04/twitter-quiroga-01.jpg" alt="" class="wp-image-986" />



<div class="wp-block-image"><a href="https://www.patreon.com/braininspired" target="_blank" rel="noreferrer noopener"><img src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></a></div>



<div class="wp-block-image"><a href="https://www.amazon.com/gp/product/1950665054/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=1950665054&amp;linkId=1ebd0a27275252e68cd820669f7aac83" target="_blank" rel="noreferrer noopener"><img src="https://braininspired.co/wp-content/uploads/2020/04/neuroscience-fiction-789x1184-2-682x1024.jpg" alt="" class="wp-image-988" width="171" height="256" /></a></div>



<p>Rodrigo and I discuss concept cells and his latest book, NeuroScience Fiction. The book is a whirlwind of many of the big questions in neuroscience, each one framed by of one of Rodrigo’s favorite science fiction films and buttressed by tons of history, literature, and philosophy. We discuss a few of the topics in the book, like AI, identity, free will, consciousness, and immortality, and we keep returning to concept cells and the role of abstraction in human cognition.</p>



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2020/04/headshot.jpeg" alt="" class="wp-image-989" /></div>



<p>Notes:</p>



<ul><li>Rodrigo's lab website: <a href="https://www2.le.ac.uk/centres/csn">Centre for Systems Neuroscience</a> at the University of Leicester, UK</li><li>His book:<ul><li><a href="https://www.amazon.com/gp/product/1950665054/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=1950665054&amp;linkId=1ebd0a27275252e68cd820669f7aac83" target="_blank" rel="noreferrer noopener">NeuroScience Fiction: From "2001: A Space Odyssey" to "Inception," How Neuroscience Is Transforming Sci-Fi into Reality―While Challenging Our Beliefs About the Mind, Machines, and What Makes us Human.</a></li></ul></li><li>Papers we discuss or mention:<ul><li><a href="https://www2.le.ac.uk/departments/engineering/research/bioengineering/neuroengineering-lab/Publications/NRN_RQQ_2012.pdf">Concept cells: the building blocks of declarative memory functions.</a></li><li><a href="https://science.sciencemag.org/content/363/6434/1388">Neural representations across species.</a></li><li><a href="https://doi.org/10.1016/j.cub.2020.03.004">Searching for the neural correlates of human intelligence.</a></li></ul></li><li>Talks:<ul><li>Concept cells and their role in memory - <a href="https://www.youtube.com/watch?v=Y1ID0FQN9tg&amp;feature=youtu.be">Part 1</a> and <a href="https://www.youtube.com/watch?v=TRfV8yhTudY">Part 2</a></li></ul></li></ul>
]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[












Rodrigo and I discuss concept cells and his latest book, NeuroScience Fiction. The book is a whirlwind of many of the big questions in neuroscience, each one framed by of one of Rodrigo’s favorite science fiction films and buttressed by tons of history, literature, and philosophy. We discuss a few of the topics in the book, like AI, identity, free will, consciousness, and immortality, and we keep returning to concept cells and the role of abstraction in human cognition.







Notes:



Rodrigo's lab website: Centre for Systems Neuroscience at the University of Leicester, UKHis book:NeuroScience Fiction: From "2001: A Space Odyssey" to "Inception," How Neuroscience Is Transforming Sci-Fi into Reality―While Challenging Our Beliefs About the Mind, Machines, and What Makes us Human.Papers we discuss or mention:Concept cells: the building blocks of declarative memory functions.Neural representations across species.Searching for the neural correlates of human intelligence.Talks:Concept cells and their role in memory - Part 1 and Part 2
]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 068 Rodrigo Quian Quiroga: NeuroScience Fiction]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[
<img src="https://braininspired.co/wp-content/uploads/2020/04/twitter-quiroga-01.jpg" alt="" class="wp-image-986" />



<div class="wp-block-image"><a href="https://www.patreon.com/braininspired" target="_blank" rel="noreferrer noopener"><img src="https://braininspired.co/wp-content/uploads/2018/10/become_a_patron_button@2x.png" alt="" class="wp-image-585" /></a></div>



<div class="wp-block-image"><a href="https://www.amazon.com/gp/product/1950665054/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=1950665054&amp;linkId=1ebd0a27275252e68cd820669f7aac83" target="_blank" rel="noreferrer noopener"><img src="https://braininspired.co/wp-content/uploads/2020/04/neuroscience-fiction-789x1184-2-682x1024.jpg" alt="" class="wp-image-988" width="171" height="256" /></a></div>



<p>Rodrigo and I discuss concept cells and his latest book, NeuroScience Fiction. The book is a whirlwind of many of the big questions in neuroscience, each one framed by of one of Rodrigo’s favorite science fiction films and buttressed by tons of history, literature, and philosophy. We discuss a few of the topics in the book, like AI, identity, free will, consciousness, and immortality, and we keep returning to concept cells and the role of abstraction in human cognition.</p>



<div class="wp-block-image"><img src="https://braininspired.co/wp-content/uploads/2020/04/headshot.jpeg" alt="" class="wp-image-989" /></div>



<p>Notes:</p>



<ul><li>Rodrigo's lab website: <a href="https://www2.le.ac.uk/centres/csn">Centre for Systems Neuroscience</a> at the University of Leicester, UK</li><li>His book:<ul><li><a href="https://www.amazon.com/gp/product/1950665054/ref=as_li_qf_asin_il_tl?ie=UTF8&amp;tag=pmiddlebroo09-20&amp;creative=9325&amp;linkCode=as2&amp;creativeASIN=1950665054&amp;linkId=1ebd0a27275252e68cd820669f7aac83" target="_blank" rel="noreferrer noopener">NeuroScience Fiction: From "2001: A Space Odyssey" to "Inception," How Neuroscience Is Transforming Sci-Fi into Reality―While Challenging Our Beliefs About the Mind, Machines, and What Makes us Human.</a></li></ul></li><li>Papers we discuss or mention:<ul><li><a href="https://www2.le.ac.uk/departments/engineering/research/bioengineering/neuroengineering-lab/Publications/NRN_RQQ_2012.pdf">Concept cells: the building blocks of declarative memory functions.</a></li><li><a href="https://science.sciencemag.org/content/363/6434/1388">Neural representations across species.</a></li><li><a href="https://doi.org/10.1016/j.cub.2020.03.004">Searching for the neural correlates of human intelligence.</a></li></ul></li><li>Talks:<ul><li>Concept cells and their role in memory - <a href="https://www.youtube.com/watch?v=Y1ID0FQN9tg&amp;feature=youtu.be">Part 1</a> and <a href="https://www.youtube.com/watch?v=TRfV8yhTudY">Part 2</a></li></ul></li></ul>
]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/068-Rodrigo-Quian-Quiroga.mp3" length="91080614"
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[












Rodrigo and I discuss concept cells and his latest book, NeuroScience Fiction. The book is a whirlwind of many of the big questions in neuroscience, each one framed by of one of Rodrigo’s favorite science fiction films and buttressed by tons of history, literature, and philosophy. We discuss a few of the topics in the book, like AI, identity, free will, consciousness, and immortality, and we keep returning to concept cells and the role of abstraction in human cognition.







Notes:



Rodrigo's lab website: Centre for Systems Neuroscience at the University of Leicester, UKHis book:NeuroScience Fiction: From "2001: A Space Odyssey" to "Inception," How Neuroscience Is Transforming Sci-Fi into Reality―While Challenging Our Beliefs About the Mind, Machines, and What Makes us Human.Papers we discuss or mention:Concept cells: the building blocks of declarative memory functions.Neural representations across species.Searching for the neural correlates of human intelligence.Talks:Concept cells and their role in memory - Part 1 and Part 2
]]>
                </itunes:summary>
                                    <itunes:image href="https://episodes.castos.com/braininspired/images/twitter-quiroga-01.jpg"></itunes:image>
                                                                            <itunes:duration>01:34:44</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
                    <item>
                <title>
                    <![CDATA[BI 000 Introduction]]>
                </title>
                <pubDate>Mon, 23 Jul 2018 23:52:56 +0000</pubDate>
                <dc:creator>Paul Middlebrooks</dc:creator>
                <guid isPermaLink="true">
                    https://brain-inspired.castos.com/podcasts/330/episodes/bi-000-introduction</guid>
                                    <link>https://brain-inspired.castos.com/episodes/bi-000-introduction</link>
                                <description>
                                            <![CDATA[]]>
                                    </description>
                <itunes:subtitle>
                    <![CDATA[]]>
                </itunes:subtitle>
                                <itunes:title>
                    <![CDATA[BI 000 Introduction]]>
                </itunes:title>
                                                <itunes:explicit>false</itunes:explicit>
                <content:encoded>
                    <![CDATA[]]>
                </content:encoded>
                                    <enclosure url="https://episodes.castos.com/braininspired/BI-000-Introduction-1.mp3" length=""
                        type="audio/mpeg">
                    </enclosure>
                                <itunes:summary>
                    <![CDATA[]]>
                </itunes:summary>
                                                                            <itunes:duration>00:04:28</itunes:duration>
                                                    <itunes:author>
                    <![CDATA[Paul Middlebrooks]]>
                </itunes:author>
                            </item>
            </channel>
</rss>
