<!-- 
RSS generated by JIRA (8.20.10#820010-sha1:ace47f9899e9ee25d7157d59aa17ab06aee30d3d) at Wed Feb 07 19:55:40 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>OpenDaylight JIRA</title>
    <link>https://jira.opendaylight.org</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>8.20.10</version>
        <build-number>820010</build-number>
        <build-date>22-06-2022</build-date>
    </build-info>


<item>
            <title>[CONTROLLER-1487] entity structures are kept even when the entity is removed.  can be used as DOS attack</title>
                <link>https://jira.opendaylight.org/browse/CONTROLLER-1487</link>
                <project id="10113" key="CONTROLLER">controller</project>
                    <description>&lt;p&gt;when a node connects to ovsdb southbound, there is an entity-owner node&lt;br/&gt;
created.  for single node, it looks as below.  However, when the device&lt;br/&gt;
disconnects, it still remains.  It does correctly reflect that there is no&lt;br/&gt;
ownershipt, but it should be removed entirely.&lt;/p&gt;

&lt;p&gt;when node connected:&lt;/p&gt;

&lt;p&gt;    &quot;entity-owners&quot;: {&lt;br/&gt;
        &quot;entity-type&quot;: [&lt;br/&gt;
            {&lt;br/&gt;
                &quot;entity&quot;: [&lt;br/&gt;
                    {&lt;br/&gt;
                        &quot;candidate&quot;: [&lt;/p&gt;
                            {
                                &quot;name&quot;: &quot;member-1&quot;
                            }
&lt;p&gt;                        ],&lt;br/&gt;
                        &quot;id&quot;: &quot;/network-topology:network-topology/network-topology:topology&lt;span class=&quot;error&quot;&gt;&amp;#91;network-topology:topology-id=&amp;#39;ovsdb:1&amp;#39;&amp;#93;&lt;/span&gt;/network-topology:node&lt;span class=&quot;error&quot;&gt;&amp;#91;network-topology:node-id=&amp;#39;ovsdb://uuid/bc330598-4581-4d9f-b932-e362b452137b&amp;#39;&amp;#93;&lt;/span&gt;&quot;,&lt;br/&gt;
                        &quot;owner&quot;: &quot;member-1&quot;&lt;br/&gt;
                    }&lt;br/&gt;
                ],&lt;br/&gt;
                &quot;type&quot;: &quot;ovsdb&quot;&lt;br/&gt;
            },&lt;br/&gt;
            {&lt;br/&gt;
                &quot;entity&quot;: [&lt;br/&gt;
                    {&lt;br/&gt;
                        &quot;candidate&quot;: [&lt;/p&gt;
                            {
                                &quot;name&quot;: &quot;member-1&quot;
                            }
&lt;p&gt;                        ],&lt;br/&gt;
                        &quot;id&quot;: &quot;/general-entity:entity&lt;span class=&quot;error&quot;&gt;&amp;#91;general-entity:name=&amp;#39;ovsdb-southbound-provider&amp;#39;&amp;#93;&lt;/span&gt;&quot;,&lt;br/&gt;
                        &quot;owner&quot;: &quot;member-1&quot;&lt;br/&gt;
                    }&lt;br/&gt;
                ],&lt;br/&gt;
                &quot;type&quot;: &quot;ovsdb-southbound-provider&quot;&lt;br/&gt;
            }&lt;br/&gt;
        ]&lt;br/&gt;
    }&lt;br/&gt;
}&lt;/p&gt;


&lt;p&gt;when node disconnected:&lt;/p&gt;


&lt;p&gt;    &quot;entity-owners&quot;: {&lt;br/&gt;
        &quot;entity-type&quot;: [&lt;br/&gt;
            {&lt;br/&gt;
                &quot;entity&quot;: [&lt;/p&gt;
                    {
                        &quot;id&quot;: &quot;/network-topology:network-topology/network-topology:topology[network-topology:topology-id=&apos;ovsdb:1&apos;]/network-topology:node[network-topology:node-id=&apos;ovsdb://uuid/bc330598-4581-4d9f-b932-e362b452137b&apos;]&quot;,
                        &quot;owner&quot;: &quot;&quot;
                    }
&lt;p&gt;                ],&lt;br/&gt;
                &quot;type&quot;: &quot;ovsdb&quot;&lt;br/&gt;
            },&lt;br/&gt;
            {&lt;br/&gt;
                &quot;entity&quot;: [&lt;br/&gt;
                    {&lt;br/&gt;
                        &quot;candidate&quot;: [&lt;/p&gt;
                            {
                                &quot;name&quot;: &quot;member-1&quot;
                            }
&lt;p&gt;                        ],&lt;br/&gt;
                        &quot;id&quot;: &quot;/general-entity:entity&lt;span class=&quot;error&quot;&gt;&amp;#91;general-entity:name=&amp;#39;ovsdb-southbound-provider&amp;#39;&amp;#93;&lt;/span&gt;&quot;,&lt;br/&gt;
                        &quot;owner&quot;: &quot;member-1&quot;&lt;br/&gt;
                    }&lt;br/&gt;
                ],&lt;br/&gt;
                &quot;type&quot;: &quot;ovsdb-southbound-provider&quot;&lt;br/&gt;
            }&lt;br/&gt;
        ]&lt;br/&gt;
    }&lt;br/&gt;
}&lt;/p&gt;</description>
                <environment>&lt;p&gt;Operating System: All&lt;br/&gt;
Platform: All&lt;/p&gt;</environment>
        <key id="26041">CONTROLLER-1487</key>
            <summary>entity structures are kept even when the entity is removed.  can be used as DOS attack</summary>
                <type id="10104" iconUrl="https://jira.opendaylight.org/secure/viewavatar?size=xsmall&amp;avatarId=10303&amp;avatarType=issuetype">Bug</type>
                                                <status id="5" iconUrl="https://jira.opendaylight.org/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="green"/>
                                    <resolution id="10001">Won&apos;t Do</resolution>
                                        <assignee username="-1">Unassigned</assignee>
                                    <reporter username="jluhrsen">Jamo Luhrsen</reporter>
                        <labels>
                    </labels>
                <created>Sat, 20 Feb 2016 18:09:41 +0000</created>
                <updated>Tue, 25 Jul 2023 08:24:07 +0000</updated>
                            <resolved>Tue, 22 May 2018 10:46:56 +0000</resolved>
                                                                    <component>clustering</component>
                        <due></due>
                            <votes>0</votes>
                                    <watches>9</watches>
                                                                                                                <comments>
                            <comment id="51295" author="vishnoianil@gmail.com" created="Sun, 21 Feb 2016 07:37:46 +0000"  >&lt;p&gt;Hi Jamo,&lt;/p&gt;

&lt;p&gt;As per the discussion on following thread, this is expected behavior. &lt;/p&gt;

&lt;p&gt;&lt;a href=&quot;https://lists.opendaylight.org/pipermail/integration-dev/2016-February/005957.html&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://lists.opendaylight.org/pipermail/integration-dev/2016-February/005957.html&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="51296" author="jluhrsen" created="Tue, 23 Feb 2016 01:35:22 +0000"  >&lt;p&gt;the root issue here is that entity owner service (EOS) does not clean up the entry when it goes &quot;candidateless&quot;.  So in the case when a lot of new/unique&lt;br/&gt;
entities are learned and removed we will continue to use resources and&lt;br/&gt;
if repeated there will be an OutOfMemory exception and crash.&lt;/p&gt;

&lt;p&gt;one quick way to see this is to start an ovs bridge:&lt;/p&gt;

&lt;p&gt;sudo ovs-vsctl add-br memLeakerBridge&lt;/p&gt;

&lt;p&gt;connect it to openflowplugin:&lt;/p&gt;

&lt;p&gt;sudo ovs-vsctl set-controller memLeakerBridge tcp:${ODL_IP}:6633&lt;/p&gt;

&lt;p&gt;(NOTE: feature installed is openflowplugin-flow-services-rest)&lt;/p&gt;

&lt;p&gt;cycle through mac addresses:&lt;/p&gt;

&lt;p&gt;sudo ovs-vsctl set bridge memLeakerBridge01 other-config:hwaddr=00:00:00:00:00:01&lt;br/&gt;
sudo ovs-vsctl set bridge memLeakerBridge01 other-config:hwaddr=00:00:00:00:00:02&lt;br/&gt;
sudo ovs-vsctl set bridge memLeakerBridge01 other-config:hwaddr=00:00:00:00:00:03&lt;br/&gt;
...  and so on&lt;/p&gt;




&lt;p&gt;here&apos;s a hack of a python script to do the cycling:&lt;/p&gt;

&lt;p&gt;for i in xrange(0x00, 0xFF):&lt;br/&gt;
   for j in xrange(0x00,0xFF):&lt;br/&gt;
     for k in xrange(0x00,0xFF):&lt;br/&gt;
       print(cmd)&lt;br/&gt;
       cmd = &quot;sudo ovs-vsctl set bridge memLeakerBridge0java.lang.OutOfMemoryError: Java heap space, &apos;x&apos;) + &quot;:&quot; + format(j, &apos;x&apos;) + &quot;:&quot; + format(k, &apos;x&apos;)&lt;br/&gt;
       time.sleep(2)  # without this pause, it doesn&apos;t work.  I did not investigate&lt;/p&gt;</comment>
                            <comment id="51297" author="rovarga" created="Wed, 24 Feb 2016 16:01:38 +0000"  >&lt;p&gt;As indicated, entities do not have a complete lifecycle (as usual, removal is missing) and this is a bug.&lt;/p&gt;</comment>
                            <comment id="51298" author="tpantelis" created="Wed, 24 Feb 2016 16:37:03 +0000"  >&lt;p&gt;I&apos;ve prototyped it but I&apos;m afraid that removing an entity when &quot;candidateless&quot; will introduce latent timing bugs. I&apos;m seeing timing-related failures even in unit tests. Even if we delay removal via timer (say 15 sec) and recheck that there are no current candidates, there may be a candidate add transaction inflight, in which case the delete would remove it afterwards. I don&apos;t see any way to alleviate this potential issue with the way the in-memory data tree works.&lt;/p&gt;

&lt;p&gt;I&apos;m inclined to leave it as is. The memory footprint for an empty entity node is pretty small so it would likely take millions to run OOM (depending on how much memory is allocated) and, at least with current use cases, complete removal of an entity should be infrequent.&lt;/p&gt;

&lt;p&gt;Wrt to DOS attack, any config yang list is a potential DOS attack. Eg, one could easily keep putting nodes to the inventory node list via restconf and run it OOM.&lt;/p&gt;</comment>
                            <comment id="51299" author="jluhrsen" created="Wed, 24 Feb 2016 16:57:39 +0000"  >&lt;p&gt;(In reply to Tom Pantelis from comment #4)&lt;br/&gt;
&amp;gt; I&apos;ve prototyped it but I&apos;m afraid that removing an entity when&lt;br/&gt;
&amp;gt; &quot;candidateless&quot; will introduce latent timing bugs. I&apos;m seeing timing-related&lt;br/&gt;
&amp;gt; failures even in unit tests. Even if we delay removal via timer (say 15 sec)&lt;br/&gt;
&amp;gt; and recheck that there are no current candidates, there may be a candidate&lt;br/&gt;
&amp;gt; add transaction inflight, in which case the delete would remove it&lt;br/&gt;
&amp;gt; afterwards. I don&apos;t see any way to alleviate this potential issue with the&lt;br/&gt;
&amp;gt; way the in-memory data tree works.&lt;br/&gt;
&amp;gt; &lt;br/&gt;
&amp;gt; I&apos;m inclined to leave it as is. The memory footprint for an empty entity&lt;br/&gt;
&amp;gt; node is pretty small so it would likely take millions to run OOM (depending&lt;br/&gt;
&amp;gt; on how much memory is allocated) and, at least with current use cases,&lt;br/&gt;
&amp;gt; complete removal of an entity should be infrequent.&lt;/p&gt;


&lt;p&gt;I did not specifically keep track of the count, but I think I ran it OOM&lt;br/&gt;
with ovsdb entries numbering in the thousands and maybe slightly less&lt;br/&gt;
when doing it with openflow entries.  If it&apos;s important to get a specific&lt;br/&gt;
number, I can.  Just wanted to give my observation since it seemed like&lt;br/&gt;
a lot less than millions.&lt;/p&gt;


&lt;p&gt;&amp;gt; Wrt to DOS attack, any config yang list is a potential DOS attack. Eg, one&lt;br/&gt;
&amp;gt; could easily keep putting nodes to the inventory node list via restconf and&lt;br/&gt;
&amp;gt; run it OOM.&lt;/p&gt;

&lt;p&gt;fair point, but at least in this specific case we can also issue a &lt;br/&gt;
delete via restconf.  I&apos;m not sure the right way to get rid of the &lt;br/&gt;
stale entities, except by restarting.&lt;/p&gt;</comment>
                            <comment id="51300" author="tpantelis" created="Wed, 24 Feb 2016 18:53:48 +0000"  >&lt;p&gt;The number will depend on how much memory you allocate to the JVM - I think the default is 2G so thousands would probably do it. But the same issue would occur if you actually had thousands of valid entities.&lt;/p&gt;

&lt;p&gt;Also doesn&apos;t OVSDB/OF leave the inventory node for a bit of time before purging? If so that&apos;s accounting for memory as well and will be affected by your blaster script.&lt;/p&gt;

&lt;p&gt;If you run OOM, the process would likely be hosed and you likely wouldn&apos;t be able to issue a delete &lt;img class=&quot;emoticon&quot; src=&quot;https://jira.opendaylight.org/images/icons/emoticons/smile.png&quot; height=&quot;16&quot; width=&quot;16&quot; align=&quot;absmiddle&quot; alt=&quot;&quot; border=&quot;0&quot;/&gt; And on restart you would likely run OOM again when it restored from persistence, unless you bumped the memory. At least with entities they don&apos;t come back on restart since they&apos;re operational &lt;img class=&quot;emoticon&quot; src=&quot;https://jira.opendaylight.org/images/icons/emoticons/smile.png&quot; height=&quot;16&quot; width=&quot;16&quot; align=&quot;absmiddle&quot; alt=&quot;&quot; border=&quot;0&quot;/&gt;&lt;/p&gt;

&lt;p&gt;I&apos;m not saying we shouldn&apos;t remove the entries - I just don&apos;t see a safe/foolproof way to do it automatically w/o introducing potential race conditions that would result in difficult sporadic bugs to track down. Definitely with removing them immediately. Using a timer would be safer and reduce the chance for a race condition. How long is safe? Probably some minutes to hours we could assume the entity won&apos;t come back, although it would require more memory for the bookkeeping to track which ones are eligible to purge. But that still wouldn&apos;t stop a DOS attack like your script which would hammer it in seconds.&lt;/p&gt;

&lt;p&gt;(In reply to Jamo Luhrsen from comment #5)&lt;br/&gt;
&amp;gt; (In reply to Tom Pantelis from comment #4)&lt;br/&gt;
&amp;gt; &amp;gt; I&apos;ve prototyped it but I&apos;m afraid that removing an entity when&lt;br/&gt;
&amp;gt; &amp;gt; &quot;candidateless&quot; will introduce latent timing bugs. I&apos;m seeing timing-related&lt;br/&gt;
&amp;gt; &amp;gt; failures even in unit tests. Even if we delay removal via timer (say 15 sec)&lt;br/&gt;
&amp;gt; &amp;gt; and recheck that there are no current candidates, there may be a candidate&lt;br/&gt;
&amp;gt; &amp;gt; add transaction inflight, in which case the delete would remove it&lt;br/&gt;
&amp;gt; &amp;gt; afterwards. I don&apos;t see any way to alleviate this potential issue with the&lt;br/&gt;
&amp;gt; &amp;gt; way the in-memory data tree works.&lt;br/&gt;
&amp;gt; &amp;gt; &lt;br/&gt;
&amp;gt; &amp;gt; I&apos;m inclined to leave it as is. The memory footprint for an empty entity&lt;br/&gt;
&amp;gt; &amp;gt; node is pretty small so it would likely take millions to run OOM (depending&lt;br/&gt;
&amp;gt; &amp;gt; on how much memory is allocated) and, at least with current use cases,&lt;br/&gt;
&amp;gt; &amp;gt; complete removal of an entity should be infrequent.&lt;br/&gt;
&amp;gt; &lt;br/&gt;
&amp;gt; &lt;br/&gt;
&amp;gt; I did not specifically keep track of the count, but I think I ran it OOM&lt;br/&gt;
&amp;gt; with ovsdb entries numbering in the thousands and maybe slightly less&lt;br/&gt;
&amp;gt; when doing it with openflow entries.  If it&apos;s important to get a specific&lt;br/&gt;
&amp;gt; number, I can.  Just wanted to give my observation since it seemed like&lt;br/&gt;
&amp;gt; a lot less than millions.&lt;br/&gt;
&amp;gt; &lt;br/&gt;
&amp;gt;  &lt;br/&gt;
&amp;gt; &amp;gt; Wrt to DOS attack, any config yang list is a potential DOS attack. Eg, one&lt;br/&gt;
&amp;gt; &amp;gt; could easily keep putting nodes to the inventory node list via restconf and&lt;br/&gt;
&amp;gt; &amp;gt; run it OOM.&lt;br/&gt;
&amp;gt; &lt;br/&gt;
&amp;gt; fair point, but at least in this specific case we can also issue a &lt;br/&gt;
&amp;gt; delete via restconf.  I&apos;m not sure the right way to get rid of the &lt;br/&gt;
&amp;gt; stale entities, except by restarting.&lt;/p&gt;</comment>
                            <comment id="51301" author="rovarga" created="Wed, 24 Feb 2016 20:04:47 +0000"  >&lt;p&gt;I do not pretend to understand the EOS implementation, but EntityOwnershipShard is based on Shard, hence it inherently contains and internal DataTree and can control what data is inside that.&lt;/p&gt;

&lt;p&gt;Since entities are stored in a list, the DataTree does not remove them automatically (as it does for-non-presence containers), but it is certainly in the realm of possibility for EntityOwnershipShard to make this list appear and disappear as candidates are added or deleted.&lt;/p&gt;

&lt;p&gt;This would require non-trivial amount of surgery, I suspect, probably increasing coupling between Shard and EntityOwnershipShard. Since we have splitting out EOS on our plate for Boron, I suggest we tackle this problem once the split is done, make that squeeky-clean and then consider a backport to Beryllium.&lt;/p&gt;</comment>
                            <comment id="51302" author="tpantelis" created="Wed, 24 Feb 2016 21:30:08 +0000"  >&lt;p&gt;The EntityOwnershipShard has full control over the data.&lt;/p&gt;

&lt;p&gt;I&apos;ll provide an example race condition:&lt;/p&gt;

&lt;ul class=&quot;alternate&quot; type=&quot;square&quot;&gt;
	&lt;li&gt;node1 registers a candidate for entity1&lt;/li&gt;
	&lt;li&gt;sometime later node1 unregisters its candidate and submits a tx to remove it&lt;/li&gt;
	&lt;li&gt;the EOS leader commits the tx&lt;/li&gt;
	&lt;li&gt;the candidate list DTCL gets triggered and starts processing the change&lt;/li&gt;
	&lt;li&gt;in the meantime, node2 registers a candidate for entity1 and submits tx1 to add it&lt;/li&gt;
	&lt;li&gt;the candidate list DTCL finishes its processing and sends a CandidateRemoved message&lt;/li&gt;
	&lt;li&gt;the leader pre-commits tx1 and replicates&lt;/li&gt;
	&lt;li&gt;the leader receives the CandidateRemoved and sees that there are no more candidates&lt;br/&gt;
   and submits tx2 to remove entity1. However tx1 is in progress so tx2 is queued&lt;/li&gt;
	&lt;li&gt;consensus is received for tx1 and it is committed - node2 is now in the candidate&lt;br/&gt;
   list&lt;/li&gt;
	&lt;li&gt;however the leader then commits tx2 which deletes entity1&lt;/li&gt;
&lt;/ul&gt;


&lt;p&gt;So we end up with a strange condition where a node thinks it has a candidate but the entity is gone. It would also try to write node2 as the owner but that would fail.&lt;/p&gt;

&lt;p&gt;This is why we didn&apos;t previously deal with entity removal. Maybe there&apos;s some trickiness we can do to alleviate this scenario with more EOS/Shard coupling as Robert mentioned. It&apos;s possible the EOS could hook into the can-commit/pre-commit (via Tony&apos;s new commit cohort stuff) and inspect the modification and abort an entity delete if it has a candidate.&lt;/p&gt;</comment>
                            <comment id="51303" author="sdn.arcnt@gmail.com" created="Thu, 11 Aug 2016 05:58:59 +0000"  >&lt;p&gt;(In reply to Anil Vishnoi from comment #9)&lt;br/&gt;
&amp;gt; *** &lt;a href=&quot;https://jira.opendaylight.org/browse/OPNFLWPLUG-618&quot; title=&quot;reading table feature property Warning when using openstack with ODL Cluster&quot; class=&quot;issue-link&quot; data-issue-key=&quot;OPNFLWPLUG-618&quot;&gt;&lt;del&gt;OPNFLWPLUG-618&lt;/del&gt;&lt;/a&gt; has been marked as a duplicate of this bug. ***&lt;/p&gt;

&lt;p&gt;hi,&lt;/p&gt;

&lt;p&gt;can you please confirm that &lt;a href=&quot;https://jira.opendaylight.org/browse/OPNFLWPLUG-618&quot; title=&quot;reading table feature property Warning when using openstack with ODL Cluster&quot; class=&quot;issue-link&quot; data-issue-key=&quot;OPNFLWPLUG-618&quot;&gt;&lt;del&gt;OPNFLWPLUG-618&lt;/del&gt;&lt;/a&gt; hase been resolved or not.&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10002">
                    <name>Duplicate</name>
                                                                <inwardlinks description="is duplicated by">
                                        <issuelink>
            <issuekey id="21989">OVSDB-297</issuekey>
        </issuelink>
                            </inwardlinks>
                                    </issuelinktype>
                    </issuelinks>
                <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                            <customfield id="customfield_11400" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                        <customfield id="customfield_10208" key="com.atlassian.jira.plugin.system.customfieldtypes:textfield">
                        <customfieldname>External issue ID</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>5397</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10201" key="com.atlassian.jira.plugin.system.customfieldtypes:url">
                        <customfieldname>External issue URL</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue><![CDATA[https://bugs.opendaylight.org/show_bug.cgi?id=5397]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10206" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Issue Type</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10300"><![CDATA[Bug]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10000" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>0|i02qvj:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                </customfields>
    </item>
</channel>
</rss>