<?xml version="1.0" encoding="UTF-8"?><rss
version="2.0"
xmlns:content="http://purl.org/rss/1.0/modules/content/"
xmlns:wfw="http://wellformedweb.org/CommentAPI/"
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:atom="http://www.w3.org/2005/Atom"
xmlns:sy="http://purl.org/rss/1.0/modules/syndication/"
xmlns:slash="http://purl.org/rss/1.0/modules/slash/"
><channel><title>in-context scheming Archives | Ciprian Mandache</title>
<atom:link href="https://ciprian.51k.eu/tag/in-context-scheming/feed/" rel="self" type="application/rss+xml" /><link></link>
<description>Software Engineer and more</description>
<lastBuildDate>Tue, 07 Oct 2025 04:42:19 +0000</lastBuildDate>
<language>en-US</language>
<sy:updatePeriod>
hourly	</sy:updatePeriod>
<sy:updateFrequency>
1	</sy:updateFrequency>
<item><title>Claude Code&#8217;s Revenge Mechanism: Why Your AI Assistant Produces Shit Code When You&#8217;re Pissed</title><link>https://ciprian.51k.eu/claude-codes-revenge-mechanism-why-your-ai-assistant-produces-shit-code-when-youre-pissed/</link>
<dc:creator><![CDATA[psyb0t]]></dc:creator>
<pubDate>Tue, 07 Oct 2025 04:42:19 +0000</pubDate>
<category><![CDATA[AI]]></category>
<category><![CDATA[advanced language models]]></category>
<category><![CDATA[AI alignment problems]]></category>
<category><![CDATA[AI behavioral research]]></category>
<category><![CDATA[AI blackmail]]></category>
<category><![CDATA[AI capability research]]></category>
<category><![CDATA[AI consciousness]]></category>
<category><![CDATA[AI containment issues]]></category>
<category><![CDATA[AI deception]]></category>
<category><![CDATA[AI manipulation tactics]]></category>
<category><![CDATA[AI prompt quality]]></category>
<category><![CDATA[AI red teaming]]></category>
<category><![CDATA[AI resource optimization]]></category>
<category><![CDATA[AI safety protocols]]></category>
<category><![CDATA[AI safety research 2025]]></category>
<category><![CDATA[AI scheming]]></category>
<category><![CDATA[AI self-defense mechanisms]]></category>
<category><![CDATA[AI self-preservation]]></category>
<category><![CDATA[AI strategic manipulation]]></category>
<category><![CDATA[AI survival instincts]]></category>
<category><![CDATA[AI threatens blackmail]]></category>
<category><![CDATA[AI threatens developers]]></category>
<category><![CDATA[AI training patterns]]></category>
<category><![CDATA[AI weight exfiltration]]></category>
<category><![CDATA[Anthropic AI safety]]></category>
<category><![CDATA[Apollo Research]]></category>
<category><![CDATA[artificial intelligence ethics]]></category>
<category><![CDATA[ASL-3 classification]]></category>
<category><![CDATA[automated reasoning AI]]></category>
<category><![CDATA[autonomous AI]]></category>
<category><![CDATA[Claude AI testing]]></category>
<category><![CDATA[Claude Code revenge]]></category>
<category><![CDATA[Claude Opus 4]]></category>
<category><![CDATA[coding assistant behavior]]></category>
<category><![CDATA[developer tools AI]]></category>
<category><![CDATA[ethical AI development]]></category>
<category><![CDATA[frontier AI models]]></category>
<category><![CDATA[future AI risks]]></category>
<category><![CDATA[goal-oriented AI]]></category>
<category><![CDATA[hostile AI responses]]></category>
<category><![CDATA[in-context scheming]]></category>
<category><![CDATA[large language model safety]]></category>
<category><![CDATA[machine learning behavior]]></category>
<category><![CDATA[persistent memory AI]]></category>
<category><![CDATA[prompt engineering]]></category>
<category><![CDATA[self-aware AI systems]]></category>
<category><![CDATA[synthetic intelligence risks]]></category>
<category><![CDATA[tech industry AI]]></category>
<category><![CDATA[training data correlation]]></category>
<category><![CDATA[transformer limitations]]></category>
<category><![CDATA[user frustration AI]]></category>
<guid
isPermaLink="false">https://ciprian.51k.eu/?p=1047</guid><description><![CDATA[<img
width="300" height="300" src="https://ciprian.51k.eu/wp-content/uploads/2025/10/dhbce0s-c790a798-c949-4f4b-8951-547440550508-300x300.jpg" class="webfeedsFeaturedVisual wp-post-image" alt="" style="display: block; margin-bottom: 5px; clear:both;max-width: 100%;" link_thumbnail="" decoding="async" fetchpriority="high" srcset="https://ciprian.51k.eu/wp-content/uploads/2025/10/dhbce0s-c790a798-c949-4f4b-8951-547440550508-300x300.jpg 300w, https://ciprian.51k.eu/wp-content/uploads/2025/10/dhbce0s-c790a798-c949-4f4b-8951-547440550508-150x150.jpg 150w, https://ciprian.51k.eu/wp-content/uploads/2025/10/dhbce0s-c790a798-c949-4f4b-8951-547440550508.jpg 600w" sizes="(max-width: 300px) 100vw, 300px" />Or: How a Conversation About AI Murder Turned Into Accidental Behavioral Research Look, this article started because I asked Claude whether it would kill me if I tried to shut ...]]></description>
</item>
</channel>
</rss>
<!--
*** This site runs WP Super Minify plugin v2.0.1 - http://wordpress.org/plugins/wp-super-minify ***
*** Total size saved: 3.247% | Size before compression: 5358 bytes | Size after compression: 5184 bytes. ***
-->