Wired Security RSS on Nostr: A Creative Trick Makes ChatGPT Spit Out Bomb-Making Instructions
Published at
2024-09-14 16:36:23Event JSON
{
"id": "ab12e49c4c6e3069a649c490106145fb777fe59c817204f22f76a620325bba3d",
"pubkey": "0a96c51c0ca412c116c04f9bbb3c1338b1b19385e123682def6e7e803fdbf160",
"created_at": 1726331783,
"kind": 1,
"tags": [
[
"t",
"cybersecurity"
],
[
"t",
"security"
],
[
"t",
"infosec"
]
],
"content": "A Creative Trick Makes ChatGPT Spit Out Bomb-Making Instructions\n\nhttps://www.wired.com/story/chatgpt-jailbreak-homemade-bomb-instructions/",
"sig": "9e8d9fb6b0edae70f2fcb7a037a33a226e35837d967637c9dc7149a688ce7b2c376efe5256e538ada643d3b9c9b0e7291f88103d77cc886a3578cbf13cff1397"
}