1from spacy.lang.fr.stop_words import STOP_WORDS as fr_stop
2from spacy.lang.en.stop_words import STOP_WORDS as en_stop
3
4final_stopwords_list = list(fr_stop) + list(en_stop)
5tfidf_vectorizer = TfidfVectorizer(max_df=0.8, max_features=200000, min_df=0.2, stop_words=final_stopwords_list, use_idf=True, tokenizer=tokenize_and_stem, ngram_range(1,3))
1from spacy.lang.fr.stop_words import STOP_WORDS as fr_stop
2from spacy.lang.en.stop_words import STOP_WORDS as en_stop
3
4final_stopwords_list = list(fr_stop) + list(en_stop)
5tfidf_vectorizer = TfidfVectorizer(max_df=0.8, max_features=200000, min_df=0.2, stop_words=final_stopwords_list, use_idf=True, tokenizer=tokenize_and_stem, ngram_range(1,3))
6
1from nltk.corpus import stopwords
2
3final_stopwords_list = stopwords.words('english') + stopwords.words('french')
4tfidf_vectorizer = TfidfVectorizer(max_df=0.8, max_features=200000, min_df=0.2, stop_words=final_stopwords_list, use_idf=True, tokenizer=tokenize_and_stem, ngram_range(1,3))
5
1from nltk.corpus import stopwords
2
3final_stopwords_list = stopwords.words('english') + stopwords.words('french')
4tfidf_vectorizer = TfidfVectorizer(max_df=0.8, max_features=200000, min_df=0.2, stop_words=final_stopwords_list, use_idf=True, tokenizer=tokenize_and_stem, ngram_range(1,3))