Event JSON
{
"id": "94df0d8bdb38f9d3e32783580cca8ab46a4a1714fd0e500cc2bf571b5d8e1257",
"pubkey": "d4ae453a7544ccf834ec4af61adfb550a13edd1384358a374a795f3deec449d9",
"created_at": 1749733685,
"kind": 1,
"tags": [
[
"t",
"glasgow"
],
[
"t",
"LLMs"
],
[
"t",
"hallucinating"
],
[
"t",
"bullshitting"
],
[
"t",
"LLM"
],
[
"t",
"chatgpt"
],
[
"t",
"openai"
],
[
"t",
"claude"
],
[
"t",
"llama"
],
[
"t",
"ai"
],
[
"proxy",
"https://dice.camp/users/strangequark/statuses/114670546781128960",
"activitypub"
],
[
"client",
"Mostr",
"31990:6be38f8c63df7dbf84db7ec4a6e6fbbd8d19dca3b980efad18585c46f04b26f9:mostr",
"wss://relay.mostr.pub"
]
],
"content": "According to researchers at University of #Glasgow, #LLMs are not #hallucinating. The technical term is #bullshitting.\n\n\"Calling their mistakes ‘hallucinations’ lends itself to the confusion that the machines are in some way misperceiving but are nonetheless trying to convey something that they believe or have perceived. This is the wrong metaphor.... they are not trying to convey information at all. They are bullshitting.\"\n\nhttps://link.springer.com/article/10.1007/s10676-024-09775-5\n\n#LLM #ChatGPT #OpenAI #Claude #LLama #AI",
"sig": "2eec9fecb8d0b245fd75e131880f555775115ceb591b172e0c89f4caa00682ca64b97c400ea51f1ca54d5059aa8c0e6908173787c169d5cade34209005609b38"
}