Event JSON
{
"id": "e40ebab157d8f500766d77b2607e41b1d3268b188702309a19b75fe074c33aae",
"pubkey": "c9badfeaadbdeccc6e226b3b6d3ff17b132cf3b29095cf117620b2ee610f861a",
"created_at": 1746476189,
"kind": 30023,
"tags": [
[
"d",
"how-to-run-llms-locally-on-android"
],
[
"title",
"How to Run LLMs Locally on Android"
],
[
"summary",
"Use the free app PocketPal to run LLMs offline"
],
[
"published_at",
"1746476189"
],
[
"t",
"article"
],
[
"t",
"android"
],
[
"t",
"ai"
],
[
"t",
"llm"
],
[
"t",
"privacy"
]
],
"content": "1. Install [PocketPal](https://github.com/a-ghorbani/pocketpal-ai) (it's free and open source)\n2. Launch the app, open the menu, and navigate to **Models**\n3. Download one or more models (e.g. *Phi*, *Llama*, *Qwen*)\n4. Once downloaded, tap **Load** to start chatting\n\nℹ️ Experiment with different models and their quantizations (Q4, Q6, Q8, etc.) to find the most suitable one",
"sig": "a91ab82611a3194eaa09058c952ec951d949d755940ce1324565e9e0897b083e2a8c18a84a843de3804461434147b87d6c617fa1576b7fcf452d3f1187803297"
}