{"payload":{"feedbackUrl":"https://github.com/orgs/community/discussions/53140","repo":{"id":765126815,"defaultBranch":"main","name":"llama-cpp-rs","ownerLogin":"danbev","currentUserCanPush":false,"isFork":true,"isEmpty":false,"createdAt":"2024-02-29T10:33:16.000Z","ownerAvatar":"https://avatars.githubusercontent.com/u/432351?v=4","public":true,"private":false,"isOrgOwned":false},"refInfo":{"name":"","listCacheKey":"v0:1709203051.0","currentOid":""},"activityList":{"items":[{"before":null,"after":"dd264e79bc57312dd9eb2c2c3580e244b1427a2a","ref":"refs/heads/with_main_gpu","pushedAt":"2024-02-29T10:37:31.000Z","pushType":"branch_creation","commitsCount":0,"pusher":{"login":"danbev","name":"Daniel Bevenius","path":"/danbev","primaryAvatarUrl":"https://avatars.githubusercontent.com/u/432351?s=80&v=4"},"commit":{"message":"add with_main_gpu to LlamaModelParams\n\nThis commit adds a `with_main_gpu` method to `LlamaModelParams` which\nallows the main GPU to be set.\n\nSigned-off-by: Daniel Bevenius ","shortMessageHtmlLink":"add with_main_gpu to LlamaModelParams"}}],"hasNextPage":false,"hasPreviousPage":false,"activityType":"all","actor":null,"timePeriod":"all","sort":"DESC","perPage":30,"cursor":"djE6ks8AAAAECJdldwA","startCursor":null,"endCursor":null}},"title":"Activity ยท danbev/llama-cpp-rs"}