官方介绍:OpenAttack:文本对抗攻击工具包
项目网址:https://github.com/thunlp/OpenAttack

git clone https://github.com/thunlp/OpenAttack.git cd OpenAttack python setup.py install
- import nltk
- nltk.download()
- https://github.com/nltk/nltk_data/tree/gh-pages/packages

import OpenAttackimport nltkfrom nltk.sentiment.vader import SentimentIntensityAnalyzerimport numpy as npfrom tqdm import tqdm
def make_model(): class MyClassifier(OpenAttack.Classifier): def __init__(self): try: self.model = SentimentIntensityAnalyzer() except LookupError: nltk.download('vader_lexicon') self.model = SentimentIntensityAnalyzer() def get_prob(self, input_): ret = [] for sent in input_: res = self.model.polarity_scores(sent) prob = (res["pos"] + 1e-6) / (res["neg"] + res["pos"] + 1e-6) ret.append(np.array([1 - prob, prob])) return np.array(ret) return MyClassifier() def main(): print("New Attacker") attacker = OpenAttack.attackers.PWWSAttacker() print("Build model") clsf = make_model() dataset = OpenAttack.DataManager.loadDataset("SST.sample")[:1000] print("Start attack") options = { "success_rate": True, "fluency": False, "mistake": False, "semantic": False, "levenstein": True, "word_distance": False, "modification_rate": True, "running_time": True, "invoke_limit": 500, "average_invoke": True } attack_eval = OpenAttack.attack_evals.InvokeLimitedAttackEval(attacker, clsf, **options ) attack_eval.eval(dataset, visualize=True) if __name__ == "__main__": main()








