Event JSON
{
"id": "afe3a995e3c9fd3677136fe1352eb1d402c792489fd668c2a7086c151efa5501",
"pubkey": "db7c27447b66df27b02d6f1fa8b910ed98bd9d276f678d1995c7e82467cc968f",
"created_at": 1718152209,
"kind": 1,
"tags": [
[
"e",
"44d277f422a1e66b78e3fe6d4f3562cf671a12e57009e015453f0a0dafb3a620",
"",
"reply"
],
[
"p",
"db7c27447b66df27b02d6f1fa8b910ed98bd9d276f678d1995c7e82467cc968f"
],
[
"proxy",
"https://thingy.social/@malcircuit/112600823171059360",
"web"
],
[
"e",
"40621b6e9bacd8d828708036896d182c89ae4d7d41b6a903a8a7430d9a32f3a9",
"",
"root"
],
[
"proxy",
"https://thingy.social/users/malcircuit/statuses/112600823171059360",
"activitypub"
],
[
"L",
"pink.momostr"
],
[
"l",
"pink.momostr.activitypub:https://thingy.social/users/malcircuit/statuses/112600823171059360",
"pink.momostr"
]
],
"content": "This is likely a fundamental flaw with generative AI that can never be fixed. It will always be wrong. It has no way to determine whether a string of text is \"correct\" because the system has no epistemological framework to evaluate such things.\n\nA statistical model is a statistical model is a statistical model. There's no understanding. No logic. No intelligence.",
"sig": "1c2c53b1c9ecd0df90c8954b1dbeda801f44124a6e7fb8bc241f406408ed3b6fcd5c302e047b80237a31f1dfb951938cc5ce4b96d3d08e0acfb278ce5e8a01ed"
}