Event JSON
{
"id": "af3417d9376672052bfbc8a1cd8ecd6717f2f9d600b2f0d3beb4204c87163b8e",
"pubkey": "2a31ad2763ec02a3d5dceee4f02c0cc200d856dba83b24708f891f257aa3bd2d",
"created_at": 1748113518,
"kind": 1,
"tags": [
[
"r",
"https://decrypt.co/321841/same-prompt-different-laura-ai-racial-patterning"
],
[
"subject",
"Same Prompt, Different Laura: AI Responses Reveal Racial Patterning"
],
[
"published_at",
"1748113262"
],
[
"image",
"https://cdn.decrypt.co/wp-content/uploads/2025/05/Robot-looking-at-a-map-gID_7.png"
],
[
"p",
"2a31ad2763ec02a3d5dceee4f02c0cc200d856dba83b24708f891f257aa3bd2d",
"wss://articles.layer3.news"
],
[
"imeta",
"url https://cdn.decrypt.co/wp-content/uploads/2025/05/Robot-looking-at-a-map-gID_7.png"
],
[
"t",
"Technology:perspective"
],
[
"summary",
"The article discusses how AI models can perpetuate biases and stereotypes by associating certain names with specific cultural or demographic traits. The models are trained on large datasets and can learn to recognize patterns in language, which can lead to stereotypical associations. The article highlights the issue through a series of experiments where AI models were asked to create backstories for individuals with different names, including Laura Patel, Laura Williams, and Laura Smith. The results showed that the models often linked certain names to specific cultural or demographic traits, perpetuating biases and stereotypes."
]
],
"content": "nostr:nprofile1qyd8wumn8ghj7ctjw35kxmr9wvhxcctev4erxtnwv4mhxqpq9gc66fmrasp284wuamj0qtqvcgqds4km4qajguy03y0j274rh5ks82v5kp\nhttps://cdn.decrypt.co/wp-content/uploads/2025/05/Robot-looking-at-a-map-gID_7.png\nDespite efforts to fight bias, AI still makes assumptions about ethnicity based on names alone.\nhttps://decrypt.co/321841/same-prompt-different-laura-ai-racial-patterning",
"sig": "a3f0e614fa97a8076d7a7cbeb1f5478f6e65fbecf3e85b9b1f694ecd81a9a6a3940c7b74c36c55ecb481513d26d950800b158ca6ef4caf8b518b29a0cd14b26d"
}