{"payload":{"header_redesign_enabled":false,"results":[{"id":"752025291","archived":false,"color":"#DA5B0B","followers":211,"has_funding_file":false,"hl_name":"centerforaisafety/HarmBench","hl_trunc_description":"HarmBench: A Standardized Evaluation Framework for Automated Red Teaming and Robust Refusal","language":"Jupyter Notebook","mirror":false,"owned_by_organization":true,"public":true,"repo":{"repository":{"id":752025291,"name":"HarmBench","owner_id":104453589,"owner_login":"centerforaisafety","updated_at":"2024-07-11T18:47:07.588Z","has_issues":true}},"sponsorable":false,"topics":[],"type":"Public","help_wanted_issues_count":0,"good_first_issue_issues_count":0,"starred_by_current_user":false}],"type":"repositories","page":1,"page_count":1,"elapsed_millis":84,"errors":[],"result_count":1,"facets":[],"protected_org_logins":[],"topics":null,"query_id":"","logged_in":false,"sign_up_path":"/signup?source=code_search_results","sign_in_path":"/login?return_to=https%3A%2F%2Fgithub.com%2Fsearch%3Fq%3Drepo%253Acenterforaisafety%252FHarmBench%2B%2Blanguage%253A%2522Jupyter%2BNotebook%2522","metadata":null,"csrf_tokens":{"/centerforaisafety/HarmBench/star":{"post":"odJ-4svr43YSQnYQkhlhkksA7to1Qt2VcI8tYmEYs72LbH9-Q_YthrJ7Wv4rWdfStjI1Ey-136HSGW0XsqEsew"},"/centerforaisafety/HarmBench/unstar":{"post":"_axoPKMycrtqKKPgEOgvxDi7mzHPoZmRRnXrcPTzaZ3O8R4135G1LcI6-wh4XuYEgrk3cvZAVXowE7X_xPIoiQ"},"/sponsors/batch_deferred_sponsor_buttons":{"post":"oYw5IETW8GZNPrjoz4Ak0fHARSZd-PdvqLIVF5fB3L2TyELVuF_BDqd0YfJF_4I7MBgI0WnZc0Cqig512USutQ"}}},"title":"Repository search results"}