Wired Security RSS on Nostr: A Creative Trick Makes ChatGPT Spit Out Bomb-Making Instructions
Published at
2024-09-14 14:45:10Event JSON
{
"id": "0dcd72d7161c3ebf5e47d2a7b420280ff57e33a476804e1355d61e24f3ece060",
"pubkey": "0a96c51c0ca412c116c04f9bbb3c1338b1b19385e123682def6e7e803fdbf160",
"created_at": 1726325110,
"kind": 1,
"tags": [
[
"t",
"cybersecurity"
],
[
"t",
"security"
],
[
"t",
"infosec"
]
],
"content": "A Creative Trick Makes ChatGPT Spit Out Bomb-Making Instructions\n\nhttps://www.wired.com/story/chatgpt-jailbreak-homemade-bomb-instructions/",
"sig": "35ca7adda9aa491547043fdd61246b157e8efdcb34cf24827f35965f81889aac3fdb1ac2dce0737b216af2115400263438ebdf137603036715f9def5f007cfad"
}