{"payload":{"header_redesign_enabled":false,"results":[{"id":"125381318","archived":false,"color":"#3572A5","followers":4562,"has_funding_file":false,"hl_name":"Trusted-AI/adversarial-robustness-toolbox","hl_trunc_description":"Adversarial Robustness Toolbox (ART) - Python Library for Machine Learning Security - Evasion, Poisoning, Extraction, Inference - Red and…","language":"Python","mirror":false,"owned_by_organization":true,"public":true,"repo":{"repository":{"id":125381318,"name":"adversarial-robustness-toolbox","owner_id":56103733,"owner_login":"Trusted-AI","updated_at":"2024-06-07T22:13:31.432Z","has_issues":true}},"sponsorable":false,"topics":["python","machine-learning","privacy","ai","attack","extraction","inference","artificial-intelligence","evasion","red-team","poisoning","adversarial-machine-learning","blue-team","adversarial-examples","adversarial-attacks","trusted-ai","trustworthy-ai"],"type":"Public","help_wanted_issues_count":1,"good_first_issue_issues_count":0,"starred_by_current_user":false}],"type":"repositories","page":1,"page_count":1,"elapsed_millis":89,"errors":[],"result_count":1,"facets":[],"protected_org_logins":[],"topics":null,"query_id":"","logged_in":false,"sign_up_path":"/signup?source=code_search_results","sign_in_path":"/login?return_to=https%3A%2F%2Fgithub.com%2Fsearch%3Fq%3Drepo%253ATrusted-AI%252Fadversarial-robustness-toolbox%2B%2Blanguage%253APython","metadata":null,"csrf_tokens":{"/Trusted-AI/adversarial-robustness-toolbox/star":{"post":"_5cWV8EclqLPNtyZE6vucOYJrYzAUKeXsQVv1sCLsGHr4I4mOQCIsZHY6J3ZhDdJK66dA_864AoYY-IBX2Kp_A"},"/Trusted-AI/adversarial-robustness-toolbox/unstar":{"post":"Sbx6Df50SesHSOX-WJw4sMgiKqxAiqpiCakRy4u2xt5Hsk36ILYj4pg9-v_1-Wgwb-8jM6EyB_GJNATZ81rVrw"},"/sponsors/batch_deferred_sponsor_buttons":{"post":"W8MXVaMbV1JwZQm7iGY7rQLwoFnU0qC2f4dAQ2mKyPewPU8BRSvVx37p-_YiSUUVoC5Ut0nBU16cCdZhvrms1Q"}}},"title":"Repository search results"}