import spacy
from spacy import displacy
nlp = spacy.load("en_core_web_sm")
Natural Language Processing (NLP) is the field of Artificial Intelligence concerned with the processing and understanding of human language. Since its inception during the 1950s, machine understanding of language has played a pivotal role in translation, topic modeling, document indexing, information retrieval, and extraction.
# !pip install scikit-learn
# !pip install -U spacy
# !python -m spacy download en
#!python -m spacy download en_core_web_sm
This is one of the simplest methods of embedding words into numerical vectors. It is not often used in practice due to its oversimplification of language, but often the first embedding technique to be taught in the classroom setting.
doc1 = "I am high"
doc2 = "Yes I am high"
doc3 = "I am kidding"
tf–idf for “Term Frequency times Inverse Document Frequency
import spacy
from spacy import displacy
nlp = spacy.load('en_core_web_sm')
text = "Apple, This is first sentence. and Google this is another one. here 3rd one is"
doc = nlp(text)
doc
for token in doc:
print(token)
sent = nlp.create_pipe('sentencizer')
nlp.add_pipe(sent, before='parser')
doc = nlp(text)
for sent in doc.sents:
print(sent)
from spacy.lang.en.stop_words import STOP_WORDS
stopwords = list(STOP_WORDS)
print(stopwords)
len(stopwords)
for token in doc:
if token.is_stop == False:
print(token)
doc = nlp('run runs running runner')
for lem in doc:
print(lem.text, lem.lemma_)
doc = nlp('All is well at your end!')
for token in doc:
print(token.text, token.pos_)
displacy.render(doc, style = 'dep')
doc = nlp("New York City on Tuesday declared a public health emergency and ordered mandatory measles vaccinations amid an outbreak, becoming the latest national flash point over refusals to inoculate against dangerous diseases. At least 285 people have contracted measles in the city since September, mostly in Brooklyn’s Williamsburg neighborhood. The order covers four Zip codes there, Mayor Bill de Blasio (D) said Tuesday. The mandate orders all unvaccinated people in the area, including a concentration of Orthodox Jews, to receive inoculations, including for children as young as 6 months old. Anyone who resists could be fined up to $1,000.")
doc
displacy.render(doc, style = 'ent')
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.pipeline import Pipeline
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
data_yelp = pd.read_csv('datasets/yelp_labelled.txt', sep='\t', header = None)
data_yelp.head()
columns_name = ['Review', 'Sentiment']
data_yelp.columns = columns_name
data_yelp.head()
data_yelp.shape
data_amazon = pd.read_csv('datasets/amazon_cells_labelled.txt', sep = '\t', header = None)
data_amazon.columns = columns_name
data_amazon.head()
data_amazon.shape
data_imdb = pd.read_csv('datasets/imdb_labelled.txt', sep = '\t', header = None)
data_imdb.columns = columns_name
data_imdb.shape
data_imdb.head()
data = data_yelp.append([data_amazon, data_imdb], ignore_index=True)
data.shape
data.head()
data['Sentiment'].value_counts()
data.isnull().sum()
import string
punct = string.punctuation
punct
def text_data_cleaning(sentence):
doc = nlp(sentence)
tokens = []
for token in doc:
if token.lemma_ != "-PRON-":
temp = token.lemma_.lower().strip()
else:
temp = token.lower_
tokens.append(temp)
cleaned_tokens = []
for token in tokens:
if token not in stopwords and token not in punct:
cleaned_tokens.append(token)
return cleaned_tokens
text_data_cleaning(" Hello how are you. Like this video")
from sklearn.svm import LinearSVC
tfidf = TfidfVectorizer(tokenizer = text_data_cleaning)
classifier = LinearSVC()
X = data['Review']
y = data['Sentiment']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 42)
X_train.shape, X_test.shape
clf = Pipeline([('tfidf', tfidf), ('clf', classifier)])
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
print(classification_report(y_test, y_pred))
confusion_matrix(y_test, y_pred)
clf.predict(['Wow, this is amzing lesson'])
clf.predict(['Wow, this sucks'])
clf.predict(['Worth of watching it. Please like it'])
clf.predict(['Loved it. amazing'])