Event JSON
{
"id": "ad17936785592e0feb2d616e213dd9e0c2f5f33859241056c9d385ac98268a1f",
"pubkey": "d1b98aac14553fb95a0b458f51e6f88528dcc65c6f46d5a29844136aef9ae932",
"created_at": 1693137947,
"kind": 1,
"tags": [
[
"p",
"efa2e20e303a53d43a13fa5b4a9daf9c3a0c39b39ea947f6d9bf4c8fba36160e",
"wss://relay.mostr.pub"
],
[
"p",
"2573afb26e57be7afa75dcb4a43f92b0d445733102ba50055829de4d9cae2d1a",
"wss://relay.mostr.pub"
],
[
"e",
"6dc1930a0ac1b6d25e7d78f6767c9cdc76f34488bd655462618430c017034031",
"wss://relay.mostr.pub",
"reply"
],
[
"proxy",
"https://noagendasocial.com/users/freegnu/statuses/110961488554969782",
"activitypub"
]
],
"content": "nostr:npub1a73wyr3s8ffagwsnlfd548d0nsaqcwdnn6550akehaxglw3kzc8qce7y6q Turns out the TPU is faster and more cost effective because of the speed than the V100.\nhttps://medium.com/bigdatarepublic/cost-comparison-of-deep-learning-hardware-google-tpuv2-vs-nvidia-tesla-v100-3c63fe56c20f\n\nhttps://static.noagendasocial.com/media_attachments/files/110/961/487/958/316/419/original/965d15dc89ed6f36.png",
"sig": "dad57543eb4eb657bdc7a25973001cd55a208c1af7aff8301eb8ad0164e6c624aa023f0c3450fe3e1e48d74fbb98eb5f25db1c450195bbe1ebfd266e24c85be7"
}