{"payload":{"pageCount":2,"repositories":[{"type":"Public","name":"viper-verifiable-rl-impl","owner":"Safe-RL-Team","isFork":false,"description":"Implementation of the VIPER algorithm introduced in \"Verifiable Reinforcement Learning via Policy Extraction\" by Bastani et al. ","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":1,"starsCount":12,"forksCount":1,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-12-04T21:52:42.213Z"}},{"type":"Public","name":"viper-verifiable-reinforcement-learning","owner":"Safe-RL-Team","isFork":false,"description":"The blog post accompanying the implementation of the paper \"Viper: Verifiable Reinforcement Learning via Policy Extraction\" by Bastani et al.","allTopics":[],"primaryLanguage":{"name":"EJS","color":"#a91e50"},"pullRequestCount":0,"issueCount":0,"starsCount":1,"forksCount":0,"license":"Creative Commons Attribution 4.0 International","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-08-13T09:13:07.094Z"}},{"type":"Public","name":"lambda-bo-blog","owner":"Safe-RL-Team","isFork":false,"description":"Lambda Bayesian optimization blog","allTopics":[],"primaryLanguage":{"name":"HTML","color":"#e34c26"},"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":0,"license":"Creative Commons Attribution 4.0 International","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-04-13T08:33:19.353Z"}},{"type":"Public","name":"topics-in-RL","owner":"Safe-RL-Team","isFork":false,"description":"A compilation of recent machine learning papers focused on safe reinforcement learning","allTopics":["reinforcement-learning","safety","trustworthy-ai"],"primaryLanguage":{"name":"EJS","color":"#a91e50"},"pullRequestCount":0,"issueCount":0,"starsCount":5,"forksCount":0,"license":"Creative Commons Attribution 4.0 International","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-04-12T23:43:34.974Z"}},{"type":"Public","name":"post--example","owner":"Safe-RL-Team","isFork":true,"description":"Example Distill article repository—clone, rename, start writing!","allTopics":[],"primaryLanguage":{"name":"EJS","color":"#a91e50"},"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":78,"license":"Creative Commons Attribution 4.0 International","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-04-09T19:01:55.673Z"}},{"type":"Public","name":"curriculum-learning","owner":"Safe-RL-Team","isFork":false,"description":"Blog Post about Curriculum Induction for Safe Reinforcement Learning","allTopics":["curriculum-learning","safe-rl","reinforcement-learning"],"primaryLanguage":{"name":"JavaScript","color":"#f1e05a"},"pullRequestCount":0,"issueCount":0,"starsCount":1,"forksCount":1,"license":"Creative Commons Attribution 4.0 International","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-04-08T19:21:33.882Z"}},{"type":"Public","name":"PID","owner":"Safe-RL-Team","isFork":true,"description":"Blog post about Responsive Safety in Reinforcement Learning by PID Lagrangian Methods","allTopics":[],"primaryLanguage":{"name":"HTML","color":"#e34c26"},"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":3,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-04-02T16:22:33.334Z"}},{"type":"Public","name":"safe-action-repetition-article","owner":"Safe-RL-Team","isFork":false,"description":"","allTopics":[],"primaryLanguage":{"name":"EJS","color":"#a91e50"},"pullRequestCount":0,"issueCount":0,"starsCount":1,"forksCount":0,"license":"Creative Commons Attribution 4.0 International","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-04-01T05:05:42.726Z"}},{"type":"Public","name":"safe-action-repetition","owner":"Safe-RL-Team","isFork":false,"description":"","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":1,"forksCount":0,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-04-01T00:15:53.617Z"}},{"type":"Public","name":"CARL","owner":"Safe-RL-Team","isFork":false,"description":"Blog: Caution Parameters in \"Cautious Adaptation for Reinforcement Learning in Safety-Critical Settings\"","allTopics":[],"primaryLanguage":{"name":"JavaScript","color":"#f1e05a"},"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":0,"license":"The Unlicense","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-03-31T19:45:43.098Z"}},{"type":"Public","name":"barrier-certificates","owner":"Safe-RL-Team","isFork":false,"description":"Code for Barrier Certificates Blog: https://safe-rl-team.github.io/barrier-certificates/","allTopics":[],"primaryLanguage":{"name":"HTML","color":"#e34c26"},"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":0,"license":"The Unlicense","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-03-31T19:05:40.760Z"}},{"type":"Public","name":"barrier_certificates_code","owner":"Safe-RL-Team","isFork":false,"description":"","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":1,"forksCount":0,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-03-31T18:52:40.042Z"}},{"type":"Public","name":"CARL-params","owner":"Safe-RL-Team","isFork":false,"description":"Code: Caution Parameters in \"Cautious Adaptation for Reinforcement Learning in Safety-Critical Settings\"","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":1,"forksCount":0,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-03-31T18:19:09.430Z"}},{"type":"Public","name":"Uncertainty-Based-Offline-RL-with-Diversified-Q-Ensemble","owner":"Safe-RL-Team","isFork":false,"description":"Blog post about Uncertainty-Based Offline Reinforcement Learning with Diversified Q-Ensemble","allTopics":[],"primaryLanguage":{"name":"Jupyter Notebook","color":"#DA5B0B"},"pullRequestCount":0,"issueCount":0,"starsCount":2,"forksCount":3,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-03-31T14:44:35.409Z"}},{"type":"Public","name":"CPO-Blog","owner":"Safe-RL-Team","isFork":false,"description":"Our main blog","allTopics":[],"primaryLanguage":{"name":"HTML","color":"#e34c26"},"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":0,"license":"Creative Commons Attribution 4.0 International","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-03-31T13:48:34.014Z"}},{"type":"Public","name":"advice-distillation-code","owner":"Safe-RL-Team","isFork":false,"description":"","allTopics":[],"primaryLanguage":{"name":"Jupyter Notebook","color":"#DA5B0B"},"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":0,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-03-31T13:04:12.598Z"}},{"type":"Public","name":"advice-distillation-blog","owner":"Safe-RL-Team","isFork":false,"description":"","allTopics":[],"primaryLanguage":{"name":"HTML","color":"#e34c26"},"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":0,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-03-31T12:57:16.860Z"}},{"type":"Public","name":"SRL-NLC-Report","owner":"Safe-RL-Team","isFork":false,"description":"","allTopics":[],"primaryLanguage":{"name":"EJS","color":"#a91e50"},"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":0,"license":"Creative Commons Attribution 4.0 International","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-03-29T20:59:01.292Z"}},{"type":"Public","name":"Blog-Post-about-There-is-No-Turning-Back","owner":"Safe-RL-Team","isFork":false,"description":"","allTopics":[],"primaryLanguage":{"name":"HTML","color":"#e34c26"},"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":0,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-03-28T09:28:01.464Z"}},{"type":"Public","name":"NoTurningBack","owner":"Safe-RL-Team","isFork":false,"description":"","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":0,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-03-28T09:27:46.849Z"}},{"type":"Public","name":"cpo","owner":"Safe-RL-Team","isFork":false,"description":"","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":0,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-03-27T23:18:06.104Z"}},{"type":"Public","name":"RCPO","owner":"Safe-RL-Team","isFork":false,"description":"","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":0,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-03-27T19:11:11.994Z"}},{"type":"Public","name":"SRL-NLC","owner":"Safe-RL-Team","isFork":false,"description":"","allTopics":[],"primaryLanguage":{"name":"Jupyter Notebook","color":"#DA5B0B"},"pullRequestCount":0,"issueCount":0,"starsCount":1,"forksCount":0,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-03-26T21:04:30.129Z"}},{"type":"Public","name":"lambda-bo","owner":"Safe-RL-Team","isFork":false,"description":"Bayesian optimization hyperparameter optimization for LAMBDA","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":0,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-03-24T08:52:47.387Z"}},{"type":"Public","name":"Uncertainty-Based-Offline-RL-with-Diversified-Q-Ensemble-Implementation","owner":"Safe-RL-Team","isFork":false,"description":"This is a reimplementation of the EDAC algorithm in PyTorch. It was created as part of an University project and used for a blog post: https://github.com/Safe-RL-Team/Uncertainty-Based-Offline-RL-with-Diversified-Q-Ensemble","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":0,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-03-23T13:03:29.479Z"}},{"type":"Public","name":"adversarial-policies-pytorch-blog","owner":"Safe-RL-Team","isFork":false,"description":"Blog post for our implementation of the paper \"Adversarial Policies: Attacking Deep Reinforcement Learning\"","allTopics":[],"primaryLanguage":{"name":"HTML","color":"#e34c26"},"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":0,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-03-22T10:54:36.894Z"}},{"type":"Public","name":"Adaptive-Reward-Penalty-in-Safe-Reinforcement-Learning","owner":"Safe-RL-Team","isFork":true,"description":"In this work we have implemented RCPO into PPO and recreated the results from the original paper. This Blog post summarises our work and elaborates on our ideas and findings.","allTopics":[],"primaryLanguage":{"name":"HTML","color":"#e34c26"},"pullRequestCount":0,"issueCount":0,"starsCount":1,"forksCount":40,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-02-11T07:53:13.052Z"}},{"type":"Public","name":"presentations","owner":"Safe-RL-Team","isFork":false,"description":"slides presenting state-of-art papers on safe reinforcement learning","allTopics":[],"primaryLanguage":null,"pullRequestCount":0,"issueCount":0,"starsCount":1,"forksCount":0,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-02-07T20:13:43.710Z"}},{"type":"Public","name":"rl-from-human-preferences","owner":"Safe-RL-Team","isFork":false,"description":"","allTopics":[],"primaryLanguage":null,"pullRequestCount":0,"issueCount":0,"starsCount":1,"forksCount":0,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-02-03T14:44:51.419Z"}},{"type":"Public","name":"curriculum-learning-poster","owner":"Safe-RL-Team","isFork":false,"description":"Poster about Curriculum Induction for Safe Reinforcement Learning","allTopics":["reinforcement-learning","curriculum-learning","safe-reinforcement-learning","safe-rl"],"primaryLanguage":{"name":"TeX","color":"#3D6117"},"pullRequestCount":0,"issueCount":0,"starsCount":2,"forksCount":0,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2022-07-13T22:08:55.042Z"}}],"repositoryCount":32,"userInfo":null,"searchable":true,"definitions":[],"typeFilters":[{"id":"all","text":"All"},{"id":"public","text":"Public"},{"id":"source","text":"Sources"},{"id":"fork","text":"Forks"},{"id":"archived","text":"Archived"},{"id":"template","text":"Templates"}],"compactMode":false},"title":"Safe-RL-Team repositories"}