Wired Security RSS on Nostr: A Creative Trick Makes ChatGPT Spit Out Bomb-Making Instructions
Published at
2024-09-14 13:34:24Event JSON
{
"id": "e39c5d3ca7756eefa0ad447e42b7c8f511f68db721d45749808697d799a181f1",
"pubkey": "0a96c51c0ca412c116c04f9bbb3c1338b1b19385e123682def6e7e803fdbf160",
"created_at": 1726320864,
"kind": 1,
"tags": [
[
"t",
"cybersecurity"
],
[
"t",
"security"
],
[
"t",
"infosec"
]
],
"content": "A Creative Trick Makes ChatGPT Spit Out Bomb-Making Instructions\n\nhttps://www.wired.com/story/chatgpt-jailbreak-homemade-bomb-instructions/",
"sig": "4c5275d09c7b834bdaba084e94446ab048e48a44f5499798907fbb727ec6932140c5d3d718c3385112a8b25dcd516fe50541c49ed1a128c6a4d5eba58c7e5afc"
}