Event JSON
{
"id": "cfe6baad31057dfe2c6bf3ee69087beb94f4de47a52370d52953463d1dbcbb1a",
"pubkey": "05de5f2d3b8d98f9b929695080ae6aa51b14f00dc122eef1d40d502a0ab48154",
"created_at": 1731724130,
"kind": 1,
"tags": [
[
"p",
"3c38608fad06858fd0749a6e3b09befee3f5c5397cda8a371be55107fbde2bae",
"wss://relay.mostr.pub"
],
[
"p",
"b808d50107cd2557ceab1235027bdbc35049c64cef8c2f3ba1d9ef3d45c603c2",
"wss://relay.mostr.pub"
],
[
"zap",
"05de5f2d3b8d98f9b929695080ae6aa51b14f00dc122eef1d40d502a0ab48154",
"wss://relay.mostr.pub",
"0.915"
],
[
"zap",
"6be38f8c63df7dbf84db7ec4a6e6fbbd8d19dca3b980efad18585c46f04b26f9",
"wss://relay.mostr.pub",
"0.085"
],
[
"e",
"c6dec0a2b5728c1902b0a016c9ceaffa34483532ed7dec51a83e7fd108965356",
"wss://relay.mostr.pub",
"reply"
],
[
"proxy",
"https://shitposter.world/objects/37a14523-e9e8-441c-a0ef-cda783075820",
"activitypub"
]
],
"content": "nostr:nprofile1qy2hwumn8ghj7un9d3shjtnddaehgu3wwp6kyqpq8suxpradq6zcl5r5nfhrkzd7lm3lt3fe0ndg5dcmu4gs07779whqwqzw4s I watched a really long video about AI risk that postulates about an attacker inserting malicious code into your codebase via LLM service. It's a perfectly valid thing to think about but literally every comment is a dumbass leveraging it to argue that LLMs are useless. I would feel unsatisfied if every comment on my videos heaped me with praise but misunderstood what I said",
"sig": "97cef69982c44178ab1b303b4fefd6bdde1cbfaaf7aa0b3d1f052b7f1d38344b811681a266a08f41857845daa22684c99ccbb77c8f43f2e6cd8c4ab3cf50a55e"
}