Event JSON
{
"id": "8c31528b093b02c158ff7b518f7b998daad1ae9868ae172c8c1e990f7cd29ab1",
"pubkey": "caf44b4f261c2f6ed1a98794653d14032978faa5a803518c001c2b9c6ec63483",
"created_at": 1731006422,
"kind": 1,
"tags": [
[
"proxy",
"https://kolektiva.social/@LevZadov/113443236932559744",
"web"
],
[
"t",
"ai"
],
[
"proxy",
"https://kolektiva.social/users/LevZadov/statuses/113443236932559744",
"activitypub"
],
[
"L",
"pink.momostr"
],
[
"l",
"pink.momostr.activitypub:https://kolektiva.social/users/LevZadov/statuses/113443236932559744",
"pink.momostr"
],
[
"-"
]
],
"content": "#AI\n\n\"Anyone familiar with HR practices probably knows of the decades of studies showing that résumé with Black- and/or female-presenting names at the top get fewer callbacks and interviews than those with white- and/or male-presenting names—even if the rest of the résumé is identical. A new study shows those same kinds of biases also show up when large language models are used to evaluate résumés instead of humans.\"\n\nhttps://arstechnica.com/ai/2024/11/study-ais-prefer-white-male-names-on-resumes-just-like-humans/",
"sig": "0e61ba4df11f6a40a3cb0c44647c9d8432dbbbdf03c228eb6876dc4deee3211f0ac35bb8902bd3fbb025d362f09e79b5242d8e57667f6753019b8b9bed137d2d"
}