Event JSON
{
"id": "c2002180b1333b23657874866db02ff790a8137525dc9347fc5288efbee3e3fc",
"pubkey": "4a5549958eafeb0c61062f37a8a33988f42b85ec9b14c51bfcfce9b3bf13d7fc",
"created_at": 1714656947,
"kind": 1,
"tags": [
[
"t",
"ai"
],
[
"t",
"chatgpt"
],
[
"t",
"bard"
],
[
"t",
"chatsonic"
],
[
"t",
"gemini"
],
[
"proxy",
"https://fediscience.org/users/petersuber/statuses/112371757742391598",
"activitypub"
]
],
"content": "How bad are #AI tools at citing real research?\nhttps://www.thieme-connect.de/products/ejournals/abstract/10.1055/s-0044-1786033\n\nIn April 2023 researchers created a competition in which they asked #ChatGPT, Google #Bard [now #Gemini], and #Chatsonic to cite \"specific obstetrical randomized controlled trials (RCTs) published in 2020.\"\n\nBard ran away with it, but only cited 13.6% of the RCTs. ChatGPT scored 2.4%, and Chatsonic 0%.",
"sig": "164b75d3d2369bfdd214302cf74bd0a6e1a0fbed10d8656a099b1505e1d0ef9389f2f067845cc2a879df1c876a98d9f5c5deafa5bcc9b280f556e9f3eda063b1"
}