Event JSON
{
"id": "b3185004d9739d1bdf141874eeb2300e6745638ec13b113cd92f4ad7135172cf",
"pubkey": "4315a187e024818492e61938093ba683dae66624d202cd43738de5b8ba198c0f",
"created_at": 1718499435,
"kind": 1,
"tags": [
[
"proxy",
"https://fedi.simonwillison.net/@simon/112623579000040340",
"web"
],
[
"proxy",
"https://fedi.simonwillison.net/users/simon/statuses/112623579000040340",
"activitypub"
],
[
"L",
"pink.momostr"
],
[
"l",
"pink.momostr.activitypub:https://fedi.simonwillison.net/users/simon/statuses/112623579000040340",
"pink.momostr"
]
],
"content": "The 6th example I've seen of the same prompt injection attack against LLM chatbots: https://embracethered.com/blog/posts/2024/github-copilot-chat-prompt-injection-data-exfiltration/\n\nThe attack involves tricking an LLM chatbot with access to both private and untrusted data to embed a Markdown image with a URL to an attacker's server where that URL leaks private data extracted from the session.\n\nWe've now seen this same attack in ChatGPT itself, Google Bard, Writer.com, Amazon Q and Google NotebookLM (all now fixed, thankfully).\n\nMy collection: https://simonwillison.net/tags/markdownexfiltration/",
"sig": "5bb50b0141c627e4863fefb331e7795d1a20f1f1e46a27d670e5f0d742e23fff732f52878e71f0f945cc360ef649de65845435df394f1b5d2d339463cb3dd734"
}