Event JSON
{
"id": "182dcd270db62b10cb60c014ffd455ae93193c3c5206ce03805d1466cc04a5bc",
"pubkey": "95ea081a627cee44e532825986ecc662139d068c4bdacbe820a8f445b9c6c06b",
"created_at": 1719322270,
"kind": 1,
"tags": [
[
"e",
"b03e3cda634a9754d134afe09a048ceefb4264e09db66375b3b95d8ac4750fc6",
"",
"root"
],
[
"p",
"95ea081a627cee44e532825986ecc662139d068c4bdacbe820a8f445b9c6c06b"
],
[
"e",
"d398d85315b0062c2471cb707d146b5d020d832bf315d69d89d94557918963d7",
"",
"reply"
],
[
"t",
"reinforcementlearning"
],
[
"proxy",
"https://recsys.social/@brohrer/112677504315844372",
"web"
],
[
"proxy",
"https://recsys.social/users/brohrer/statuses/112677504315844372",
"activitypub"
],
[
"L",
"pink.momostr"
],
[
"l",
"pink.momostr.activitypub:https://recsys.social/users/brohrer/statuses/112677504315844372",
"pink.momostr"
],
[
"expiration",
"1721914271"
]
],
"content": "In practice this will introduce a small delay of several hardware steps, resulting in some noise in the agent. I don’t know of any way to avoid this. This is an important feature of real time #ReinforcementLearning that gets lost in simulations. Any real time controller or physical hardware has to deal with this delay.",
"sig": "61969625bf9204ba70ba38afe880902cbd8aebecd36947b9ac70f6425ed9e1665e0d4268e99cc9b611f12b4521cdba8ab807dfd27eb72f3575707a228fc5b13b"
}