{"payload":{"pageCount":1,"repositories":[{"type":"Public","name":"arena-hard-auto","owner":"lm-sys","isFork":false,"description":"Arena-Hard-Auto: An automatic LLM benchmark. ","allTopics":[],"primaryLanguage":{"name":"Jupyter Notebook","color":"#DA5B0B"},"pullRequestCount":0,"issueCount":3,"starsCount":294,"forksCount":24,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,28,12,16,4,4,0,0,2,6,4,0,3,5],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-25T22:20:05.730Z"}},{"type":"Public","name":"RouteLLM","owner":"lm-sys","isFork":false,"description":"A framework for serving and evaluating large language model routers.","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":4,"forksCount":0,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,9,36,16,21],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-25T21:33:48.117Z"}},{"type":"Public","name":"FastChat","owner":"lm-sys","isFork":false,"description":"An open platform for training, serving, and evaluating large language models. Release repo for Vicuna and Chatbot Arena.","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":81,"issueCount":723,"starsCount":35443,"forksCount":4348,"license":"Apache License 2.0","participation":[25,25,32,23,14,21,17,13,23,5,19,17,4,16,9,17,11,1,13,11,4,18,1,11,3,20,7,7,1,10,17,8,11,3,2,0,0,1,3,3,1,6,3,4,3,1,10,2,3,3,1,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-25T09:20:01.369Z"}},{"type":"Public","name":"lm-sys.github.io","owner":"lm-sys","isFork":false,"description":"","allTopics":[],"primaryLanguage":{"name":"JavaScript","color":"#f1e05a"},"pullRequestCount":3,"issueCount":1,"starsCount":45,"forksCount":17,"license":"Other","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-25T03:23:37.794Z"}},{"type":"Public","name":"llm-decontaminator","owner":"lm-sys","isFork":false,"description":"Code for the paper \"Rethinking Benchmark and Contamination for Language Models with Rephrased Samples\"","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":3,"starsCount":177,"forksCount":12,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,4,0,3,8,11,1,1,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-12-20T22:33:26.019Z"}},{"type":"Public archive","name":"vicuna-blog-eval","owner":"lm-sys","isFork":false,"description":"The code and data for the GPT-4 based benchmark in the vicuna blog post","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":32,"forksCount":7,"license":"Apache License 2.0","participation":[2,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-08-02T02:32:40.000Z"}}],"repositoryCount":6,"userInfo":null,"searchable":true,"definitions":[],"typeFilters":[{"id":"all","text":"All"},{"id":"public","text":"Public"},{"id":"source","text":"Sources"},{"id":"fork","text":"Forks"},{"id":"archived","text":"Archived"},{"id":"template","text":"Templates"}],"compactMode":false},"title":"Repositories"}