Event JSON
{
"id": "88af45792cee910f8f9a8f068410b841d10ebd981b78ac25370245b8a3929434",
"pubkey": "44d3e9a67573e59b0d812d21bf8e6f56590a5731422220b7240d1211fc5e7c24",
"created_at": 1707930352,
"kind": 1,
"tags": [
[
"p",
"07515e85c147755e4540b06e55632b1260ec3908fe24852cd7ec43c16d06771d",
"wss://relay.mostr.pub"
],
[
"p",
"19f681e034e87cdbcf4346fe68ca0e61653e30b8331a8ec4e639746947a39ed5",
"wss://relay.mostr.pub"
],
[
"e",
"cbf366f4b1cdd13902270b9d584a188eb08fc0b58d9928bdbae209b5b06f044b",
"wss://relay.mostr.pub",
"reply"
],
[
"proxy",
"https://mastodon.social/users/jplebreton/statuses/111930923552169619",
"activitypub"
]
],
"content": "nostr:npub1qag4apwpga64u32qkph92cetzfswcwgglcjg2txha3puzmgxwuws5p9gyn Ah, the old autonomous car argument, that falls apart with the revelation that computers make entirely different classes of errors than humans.\nI'm genuinely curious though, what are some examples of LLMs that are using knowledge graphs to reduce / mitigate hallucinations.",
"sig": "a2c36ee458dee07566c3a562149cb2a89ced5b0fd9be79f269bfb2a2c3535a8a1fa76e1a5c72b9589420f3f355f063275d62b0e7362516421376c3e26f490f25"
}