Event JSON
{
"id": "0eab442efc3e46be8b9544eea51d75e06d677b63ca418d518464fcf6bef85425",
"pubkey": "caf2d45542168135df68add110633f21ac4536639bbdec5f2b34415edabcdca4",
"created_at": 1738933996,
"kind": 1,
"tags": [
[
"t",
"hacking"
],
[
"t",
"cybersecurity"
],
[
"t",
"cybercrime"
],
[
"t",
"ai"
],
[
"imeta",
"url https://media.infosec.exchange/infosec.exchange/media_attachments/files/113/962/778/334/352/236/original/e9c42f628bf97359.png",
"m image/png",
"dim 1200x660",
"blurhash UD410usLWtSRo7jpa}a{SHb2jpjsa^b0a^fS"
],
[
"proxy",
"https://infosec.exchange/users/cybernews/statuses/113962778386125644",
"activitypub"
]
],
"content": "Hackers have found ways to hide malicious code in AI models hosted on the Hugging Face platform, ReversingLabs researchers warn.\n\n#Hacking #cybersecurity #cybercrime #AI \n\nhttps://cnews.link/malicious-ai-models-infiltrating-hugging-face-1/\n\nhttps://media.infosec.exchange/infosec.exchange/media_attachments/files/113/962/778/334/352/236/original/e9c42f628bf97359.png",
"sig": "d728c9faca9c5ac34747323500fcd2e682d57138050ab54fd172a6bccb129cf2cb90b8a5ee214e96cd2b6aff6c4a39f6aae4c43eca8926216d38b4aaa72a7a9c"
}