Event JSON
{
"id": "6c03ccd2d94e5f068f52aac8d19b97ba4ef2f70caeba52123634c7832544a25b",
"pubkey": "be8217b6563871a928b46ab9465c535bd711d7ea6755b0da1d1ff702ed869f3d",
"created_at": 1711237089,
"kind": 1,
"tags": [
[
"p",
"6a72e15af7e8ec95576876f5923accb3ad59138e4f1427109223af035a16ddf2",
"wss://relay.mostr.pub"
],
[
"p",
"312ba530d9f5162682cbe01b52dadae3a843372c920ed2051a7236c262414e4e",
"wss://relay.mostr.pub"
],
[
"e",
"252e54ca0564e859bef30a1a80a832db3e8f69e2fe541b8cd11ad9d774a2cea5",
"wss://relay.mostr.pub",
"reply"
],
[
"proxy",
"https://mastodon.social/users/adr/statuses/112147633906597680",
"activitypub"
]
],
"content": "nostr:npub1dfewzkhharkf24mgwm6eywkvkwk4jyuwfu2zwyyjywhsxkskmheqd8yx8m Not offhand, no. Anything RDNA 2 or above should be okay. Note you don't strictly *need* GPU acceleration to use Ollama - it uses quantized models that will run on straight CPU - that's how I use it at home. It will be faster with GPU of course.",
"sig": "129af17d0ec20ddfb5601933ae3b3767bc480a115943bb50f775712df1650621ef7be6587aa519a3ae0417b695dc38152043cb1b109090ca4dc0cff6a6f28534"
}