Event JSON
{
"id": "73ed0d0f385ab206ed836392be190f0b80c84afa78bca24f66605e7c6aed2008",
"pubkey": "8139cfb427b06d70923c467ab1eb5c52f375c493f9efa24d8d63c5e23b64f628",
"created_at": 1708817492,
"kind": 1,
"tags": [
[
"p",
"0177da18d799f7a2896d947e3ca433e8e02322f1458ee25ac0a450987cefdfe1",
"wss://relay.mostr.pub"
],
[
"p",
"8244f39d35ec71b3e2b69a98d07a8cf75d3cbea5943119af0f46801ea7da9c22",
"wss://relay.mostr.pub"
],
[
"e",
"5cbc3cefd1a707c5079197d6833b6c3a6678a9b63c513f61c22770e9e9639bd7",
"wss://relay.mostr.pub",
"reply"
],
[
"proxy",
"https://mastodon.social/users/tess/statuses/111989063158257674",
"activitypub"
]
],
"content": "nostr:npub1q9ma5xxhn8m69ztdj3lrefpnarszxgh3gk8wykkq53gfsl80mlssulzgdu it's interesting because LLMs self-poison if they ingest too much of their output; also more people are using image poisoning tools.\n\nSo, there may come a point where we can no longer safely train models on new data.",
"sig": "be25df3aca6e08550065e5618ba16fe3ab9fd1b7a15454cc0f003b178982c66acdabea0f459ed30e13210c37999614cd2a60d2a7a1dceb328b052dc5c16bd19e"
}