Event JSON
{
"id": "5d82fe813ec37a85fd4e5219ba2f3a7cf0f165696f831661dd92ce8898f4fb7d",
"pubkey": "ddc6c81c03da216550654f73121985d8f30636aac98903de01993746bab7bdb3",
"created_at": 1749283640,
"kind": 1,
"tags": [
[
"t",
"ai"
],
[
"t",
"memorization"
],
[
"t",
"llm"
],
[
"proxy",
"https://mastodon.social/users/ErikJonker/statuses/114641052656960802",
"activitypub"
],
[
"client",
"Mostr",
"31990:6be38f8c63df7dbf84db7ec4a6e6fbbd8d19dca3b980efad18585c46f04b26f9:mostr",
"wss://relay.mostr.pub"
]
],
"content": "Interesting, \"GPT-style models have a fixed memorization capacity of approximately 3.6 bits per parameter.\"\nhttps://venturebeat.com/ai/how-much-information-do-llms-really-memorize-now-we-know-thanks-to-meta-google-nvidia-and-cornell/\n#ai #memorization #llm",
"sig": "b452a6288113efab4499c8946f4cf315d12d59e8467655fc0f990ec3e1eebd6b6298687f492841102ce2ab50ffb06b1e90dce9cb8f557b18ee25632953420ba2"
}