Event JSON
{
"id": "d148091049cd147853314251bd16403c20c1de157fde1bacca4bc18edc630bfc",
"pubkey": "e7e59d140cd89529cbd6f8941bfa28fbd9147d84e0e575755c209784781464cc",
"created_at": 1716503367,
"kind": 1,
"tags": [
[
"p",
"7e044664c97ea1edd0fa8a8d33b5b6a0734e8ece3b126807c013a21cae667bcb"
],
[
"proxy",
"https://arvr.social/@mpesce/112492764691090999",
"web"
],
[
"proxy",
"https://arvr.social/users/mpesce/statuses/112492764691090999",
"activitypub"
],
[
"L",
"pink.momostr"
],
[
"l",
"pink.momostr.activitypub:https://arvr.social/users/mpesce/statuses/112492764691090999",
"pink.momostr"
]
],
"content": "Now it can be told:\n\nWhile doing some AI engineering work for a client, I developed a prompt - completely inadvertently - that reduced every AI chatbot to gibberish (except Anthropic's Claude 3). I then spent a week trying to alert the LLM vendors to this issue - and largely failed. There is _no_ mechanism to report flaws in these models that are already deployed to billions of users. Read the whole story in nostr:npub10czyvexf06s7m58632xn8ddk5pe5arkw8vfxsp7qzw3petnx009saf4lpy \n\nhttps://www.theregister.com/2024/05/23/ai_untested_unstable/",
"sig": "3a80b2797252d8d6df4a0e275042cb4af32b366c521bc7879db08db793f7788b7ab977575ea0f199b88ece8892847e09e81c9e3aa1d88e64a4d02c7e525d8ee3"
}