Wired Security RSS on Nostr: A Creative Trick Makes ChatGPT Spit Out Bomb-Making Instructions
Published at
2024-09-14 15:25:36Event JSON
{
"id": "3a1e15bb34d124bbf852877d2e0b8e81fafe4632f4dec31a71897b89b6d04602",
"pubkey": "0a96c51c0ca412c116c04f9bbb3c1338b1b19385e123682def6e7e803fdbf160",
"created_at": 1726327536,
"kind": 1,
"tags": [
[
"t",
"cybersecurity"
],
[
"t",
"security"
],
[
"t",
"infosec"
]
],
"content": "A Creative Trick Makes ChatGPT Spit Out Bomb-Making Instructions\n\nhttps://www.wired.com/story/chatgpt-jailbreak-homemade-bomb-instructions/",
"sig": "8de766c7e4e0ba625e35c5688ed41ca1a8eea735733f158653263ebaaeaddeeae53fe7a419f7946b04517c9d24aac249bf5c6bf364b913ccf5863271f03917c4"
}