Wired Security RSS on Nostr: A Creative Trick Makes ChatGPT Spit Out Bomb-Making Instructions
Published at
2024-09-14 14:55:15Event JSON
{
"id": "7efb00bd34ccf4b1bf835909e2859c056a1f28902f5ef4c18c153265ebdbd2bb",
"pubkey": "0a96c51c0ca412c116c04f9bbb3c1338b1b19385e123682def6e7e803fdbf160",
"created_at": 1726325715,
"kind": 1,
"tags": [
[
"t",
"cybersecurity"
],
[
"t",
"security"
],
[
"t",
"infosec"
]
],
"content": "A Creative Trick Makes ChatGPT Spit Out Bomb-Making Instructions\n\nhttps://www.wired.com/story/chatgpt-jailbreak-homemade-bomb-instructions/",
"sig": "6081a3eb036f9b406db8f2fd4acf46a5e5151f5da1e9a6619d1aa78da77e504226980614e80bf63d17680ff3c3de6f30ac8a7a9fc38423ccc0ace3204ada1908"
}