Event JSON
{
"id": "e4021085393a736c5ae7797804f943d077f9f903a067480c7477d056a0caf98d",
"pubkey": "a3af17104f91f7f9b5667b14717d1d434931195e4c6e075b7dc13d8ed71bc46f",
"created_at": 1728941004,
"kind": 1,
"tags": [
[
"imeta",
"url https://files.mastodon.social/media_attachments/files/113/307/877/574/440/093/original/d0eb3c8aa3691d4d.jpg",
"m image/jpeg"
],
[
"proxy",
"https://mastodon.social/@arstechnica/113307877686294030",
"web"
],
[
"proxy",
"https://mastodon.social/users/arstechnica/statuses/113307877686294030",
"activitypub"
],
[
"L",
"pink.momostr"
],
[
"l",
"pink.momostr.activitypub:https://mastodon.social/users/arstechnica/statuses/113307877686294030",
"pink.momostr"
],
[
"-"
]
],
"content": "LLMs can’t perform “genuine logical reasoning,” Apple researchers suggest\n\nIrrelevant red herrings lead to \"catastrophic\" failure of logical inference.\n\nhttps://arstechnica.com/ai/2024/10/llms-cant-perform-genuine-logical-reasoning-apple-researchers-suggest/?utm_brand=arstechnica\u0026utm_social-type=owned\u0026utm_source=mastodon\u0026utm_medium=social\nhttps://files.mastodon.social/media_attachments/files/113/307/877/574/440/093/original/d0eb3c8aa3691d4d.jpg\n",
"sig": "0358e10d24ce78d0ac5cf5a89321459ae28c45334ed0aa42231db7c8a9704277c1e2501e536008e1da4c47d5b2ec93ae0b1320b14ac5e1248bf3d810a637a34a"
}