Event JSON
{
"id": "13e0814a4f5c4b94b5f74bb350b9388c37e8e09951026356d306035374a17072",
"pubkey": "5bc1c11a208f6c892aad628b67a6e54fd9e68f66cb59f93900ab5861f7c3ceb2",
"created_at": 1730351748,
"kind": 1,
"tags": [
[
"proxy",
"https://techhub.social/@Techmeme/113400332163031911",
"web"
],
[
"proxy",
"https://techhub.social/users/Techmeme/statuses/113400332163031911",
"activitypub"
],
[
"L",
"pink.momostr"
],
[
"l",
"pink.momostr.activitypub:https://techhub.social/users/Techmeme/statuses/113400332163031911",
"pink.momostr"
],
[
"-"
]
],
"content": "Zuckerberg says Meta is training Llama 4 models on a cluster of 100K+ H100 chips, \"or bigger than anything that I've seen reported for what others are doing\" (Wired)\n\nhttps://www.wired.com/story/meta-llama-ai-gpu-training/\nhttp://www.techmeme.com/241031/p3#a241031p3",
"sig": "3db18e981487b740023d4f95a326f8eca446f3455846a5b740c178acee34c6e6f3867faf8c8516a5994715ffdba90ef96723524cef4bf1003412924a5c3b223b"
}