Event JSON
{
"id": "726807367568c3da4e0bbc0b1bbcad86b9992431b2b054d9b3332fdeb84a4cf2",
"pubkey": "73cb080ca85d2e99290c032ebd2749471be27ee682f116a42d88a3f34901ac36",
"created_at": 1739547779,
"kind": 1,
"tags": [
[
"t",
"opensource"
],
[
"t",
"ai"
],
[
"t",
"models"
],
[
"t",
"perfect"
],
[
"t",
"storm"
],
[
"t",
"malicious"
],
[
"t",
"code"
],
[
"t",
"vulnerabilities"
],
[
"t",
"foss"
],
[
"t",
"well"
],
[
"t",
"aware"
],
[
"t",
"used"
],
[
"t",
"security"
],
[
"t",
"stuff"
],
[
"t",
"be"
],
[
"t",
"very"
],
[
"t",
"careful"
],
[
"proxy",
"https://mastodon.social/users/rod2ik/statuses/114003003246896760",
"activitypub"
]
],
"content": "\" #OpenSource #AI #Models: #Perfect #Storm for #Malicious #Code, #Vulnerabilities \"\n\nBut hey, isn't #opensource #foss very #well #aware , and #used to all this #security #stuff ? \n\nOf course, they are.\nBut we should still #be #very #careful , of course\n\nhttps://www.darkreading.com/cyber-risk/open-source-ai-models-pose-risks-of-malicious-code-vulnerabilities",
"sig": "b1b15d110bb0faab870d5c6a8210999f45334a9e74e2163c15ff308d72b67ee8b9079ff22dbcae9d44792f99a2ec1ef8398d3d42ae89dd1dd0af8fa920cf82c7"
}