Wired Security RSS on Nostr: A Creative Trick Makes ChatGPT Spit Out Bomb-Making Instructions
Published at
2024-09-14 13:14:11Event JSON
{
"id": "270ad8f9f5e493d90d68a18cc8ad13a27c8a8e112a6ab470d7502e80db5c0251",
"pubkey": "0a96c51c0ca412c116c04f9bbb3c1338b1b19385e123682def6e7e803fdbf160",
"created_at": 1726319651,
"kind": 1,
"tags": [
[
"t",
"cybersecurity"
],
[
"t",
"security"
],
[
"t",
"infosec"
]
],
"content": "A Creative Trick Makes ChatGPT Spit Out Bomb-Making Instructions\n\nhttps://www.wired.com/story/chatgpt-jailbreak-homemade-bomb-instructions/",
"sig": "fd7d97c1fb91fc9f0e1c77cb9aee17f1d7ea0fd8e75b096f16dc635f2198bbbb73d33e6509b16ecfef9a93931ac51e71c93854fc8e0a363cc303ab48266f8b0b"
}