Event JSON
{
"id": "207524f921c7ae92700478206673cdbbbb6c69aa6bb0b045938a2033001725d7",
"pubkey": "da18e9860040f3bf493876fc16b1a912ae5a6f6fa8d5159c3de2b8233a0d9851",
"created_at": 1743912020,
"kind": 1,
"tags": [
[
"e",
"44503bef21fd81048133e4fb2a656a1b72f567f321c203900bfdf1ed2babc534",
"ws://192.168.18.7:7777",
"root",
"da18e9860040f3bf493876fc16b1a912ae5a6f6fa8d5159c3de2b8233a0d9851"
],
[
"e",
"6d692dad423309fca3da95244d3cab91d6474b942ea4072c3d4ad0e4645ed0c8",
"ws://192.168.18.7:7777",
"reply"
],
[
"p",
"9fec72d579baaa772af9e71e638b529215721ace6e0f8320725ecbf9f77f85b1",
"",
"mention"
],
[
"p",
"da18e9860040f3bf493876fc16b1a912ae5a6f6fa8d5159c3de2b8233a0d9851",
"",
"mention"
]
],
"content": "Didn’t know you were the one who wrote this! The spirit of what you are doing is great. Given LLMs are token predictors that are configured with system prompts, and are designed with tradeoffs in mind (better at coding or writing), what do you think about considering system and user prompts when measuring alignment? \n\nAlignment is becoming so overloaded, especially with doomer predictions like https://ai-2027.com/ ",
"sig": "d7675091acffd2dc898c7567a7c39206690f99a27b1164f651c410a17527a26d6a6b1c2e2b29b7f01833be3321abfe47efa50db81f349e7325c20ce982331fc3"
}