Quora Questions Pairs using BERT
Task: Identify wether two question have similar context/meaning or not
Quora Questions Pairs using BERT : Overview
Task: Identify wether two question have similar context/meaning or not
kaggle
I have tried this problem using two different approach
- Using Naive Bayes Classifier
- Using BERT
Naive Bayes Classifier
import pandas as pd
import numpy as np
import os
import nltk
from nltk.stem import WordNetLemmatizer, PorterStemmer
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
import re
from nltk.corpus import stopwords
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import MultinomialNB
path = "/content/drive/MyDrive/Quora Questions NLP"
nltk.download()
train = pd.read_csv(path+"/train.csv")
print("Total samples:",len(train))
train.head(10)
print(train.isnull().sum(axis=0))#dropping null values
train.dropna(axis=0,inplace=True)
def preprocess(series):
#remove characters other than alphabets & numerics
words = re.sub("[^A-Za-z0-9]"," ",series).lower().split()
#lemmatize words
lemm = WordNetLemmatizer()
stpwords = stopwords.words('english')
lemmitized = [lemm.lemmatize(word) for word in words if word not in stpwords]
sent = ' '.join(lemmitized)
return sent
train['question1'] =train['question1'].apply(preprocess)#Apply preprocessing
train['question2'] =train['question2'].apply(preprocess)
def concat(ser):#concatenate Question 1 & Question 2
print(ser['question1'])
return 1
train['combine'] = train.apply(lambda ser: ser['question1'] + " " + ser['question2'],axis=1)
train.head(10)
cv = TfidfVectorizer(max_features=50000)#Word to Vectors using Tf-Idf
#Take combine questions data as X
X = cv.fit_transform(train['combine'])
y = np.array(train['is_duplicate'])
print(X.shape)
#Tarin-Test Spilt
X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.05)
print(X_train.shape,X_test.shape)
naive_model = MultinomialNB()#Training
naive_model.fit(X_train,y_train)
#Predictions
y_pred_train = naive_model.predict(X_train)
y_pred_test = naive_model.predict(X_test)
accuracy_train = sum((y_pred_train == y_train).astype(int))/len(y_train)
accuracy_test = sum((y_pred_test == y_test).astype(int))/len(y_test)
print(accuracy_train,accuracy_test)
We got 74% Accuracy which is very bad for binary classification problem
BERT
I have used "Semantic Similarity with BERT" code to solve this problem.
reference : https://keras.io/examples/nlp/semantic_similarity_with_bert/
import numpy as np
import pandas as pd
import tensorflow as tf
!pip install transformers==2.11.0
import transformers
max_length = 128 # Maximum length of input sentence to the model.
batch_size = 32
epochs = 2
# Labels in our dataset.
labels = [1,0]
#1 : Non Duplicate
#0 : Duplicate
df = pd.read_csv(path+"/train.csv")
print(df.isnull().sum(axis=0))#Dropiing Null values
df.dropna(axis=0,inplace=True)
mask = np.random.rand(len(df)) < 0.7
train_df = df[mask]
not_train = df[~mask]
#create mask for val-test distribution
mask = np.random.rand(len(not_train)) < 0.5
test_df = not_train[mask]
val_df = not_train[~mask]
val_df.head(5)
print(f"Total train samples : {train_df.shape[0]}")
print(f"Total validation samples: {val_df.shape[0]}")
print(f"Total test samples: {test_df.shape[0]}")
print("Train Target Distribution")
print(train_df.is_duplicate.value_counts())
print("Validation Target Distribution")
print(val_df.is_duplicate.value_counts())
y_train = tf.keras.utils.to_categorical(train_df.is_duplicate, num_classes=2)#One hot encodding representation
print(f"y_train.shape:{y_train.shape}")
y_val = tf.keras.utils.to_categorical(val_df.is_duplicate, num_classes=2)
print(f"y_val.shape:{y_val.shape}")
y_test = tf.keras.utils.to_categorical(test_df.is_duplicate, num_classes=2)
print(f"y_test.shape:{y_test.shape}")
class BertSemanticDataGenerator(tf.keras.utils.Sequence):
"""Generates batches of data.
Args:
sentence_pairs: Array of premise and hypothesis input sentences.
labels: Array of labels.
batch_size: Integer batch size.
shuffle: boolean, whether to shuffle the data.
include_targets: boolean, whether to incude the labels.
Returns:
Tuples `([input_ids, attention_mask, `token_type_ids], labels)`
(or just `[input_ids, attention_mask, `token_type_ids]`
if `include_targets=False`)
"""
def __init__(
self,
sentence_pairs,
labels,
batch_size=batch_size,
shuffle=True,
include_targets=True,
):
self.sentence_pairs = sentence_pairs
self.labels = labels
self.shuffle = shuffle
self.batch_size = batch_size
self.include_targets = include_targets
# Load our BERT Tokenizer to encode the text.
# We will use base-base-uncased pretrained model.
self.tokenizer = transformers.BertTokenizer.from_pretrained(
"bert-base-uncased", do_lower_case=True
)
self.indexes = np.arange(len(self.sentence_pairs))
self.on_epoch_end()
def __len__(self):
# Denotes the number of batches per epoch.
return len(self.sentence_pairs) // self.batch_size
def __getitem__(self, idx):
# Retrieves the batch of index.
indexes = self.indexes[idx * self.batch_size : (idx + 1) * self.batch_size]
sentence_pairs = self.sentence_pairs[indexes]
# With BERT tokenizer's batch_encode_plus batch of both the sentences are
# encoded together and separated by [SEP] token.
encoded = self.tokenizer.batch_encode_plus(
sentence_pairs.tolist(),
add_special_tokens=True,
max_length=max_length,
return_attention_mask=True,
return_token_type_ids=True,
pad_to_max_length=True,
return_tensors="tf",
)
# Convert batch of encoded features to numpy array.
input_ids = np.array(encoded["input_ids"], dtype="int32")
attention_masks = np.array(encoded["attention_mask"], dtype="int32")
token_type_ids = np.array(encoded["token_type_ids"], dtype="int32")
# Set to true if data generator is used for training/validation.
if self.include_targets:
labels = np.array(self.labels[indexes], dtype="int32")
return [input_ids, attention_masks, token_type_ids], labels
else:
return [input_ids, attention_masks, token_type_ids]
def on_epoch_end(self):
# Shuffle indexes after each epoch if shuffle is set to True.
if self.shuffle:
np.random.RandomState(42).shuffle(self.indexes)
strategy = tf.distribute.MirroredStrategy()# Create the model under a distribution strategy scope.
with strategy.scope():
# Encoded token ids from BERT tokenizer.
input_ids = tf.keras.layers.Input(
shape=(max_length,), dtype=tf.int32, name="input_ids"
)
# Attention masks indicates to the model which tokens should be attended to.
attention_masks = tf.keras.layers.Input(
shape=(max_length,), dtype=tf.int32, name="attention_masks"
)
# Token type ids are binary masks identifying different sequences in the model.
token_type_ids = tf.keras.layers.Input(
shape=(max_length,), dtype=tf.int32, name="token_type_ids"
)
# Loading pretrained BERT model.
bert_model = transformers.TFBertModel.from_pretrained("bert-base-uncased")
# Freeze the BERT model to reuse the pretrained features without modifying them.
bert_model.trainable = False
sequence_output, pooled_output = bert_model(
input_ids, attention_mask=attention_masks, token_type_ids=token_type_ids
)
# Add trainable layers on top of frozen layers to adapt the pretrained features on the new data.
bi_lstm = tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(64, return_sequences=True))(sequence_output)
# Applying hybrid pooling approach to bi_lstm sequence output.
avg_pool = tf.keras.layers.GlobalAveragePooling1D()(bi_lstm)
max_pool = tf.keras.layers.GlobalMaxPooling1D()(bi_lstm)
concat = tf.keras.layers.concatenate([avg_pool, max_pool])
dropout = tf.keras.layers.Dropout(0.3)(concat)
output = tf.keras.layers.Dense(2, activation="softmax")(dropout)
model = tf.keras.models.Model(
inputs=[input_ids, attention_masks, token_type_ids], outputs=output
)
model.compile(
optimizer=tf.keras.optimizers.Adam(),
loss="categorical_crossentropy",
metrics=["acc"],
)
print(f"Strategy: {strategy}")
model.summary()
train_data = BertSemanticDataGenerator(
train_df[["question1", "question2"]].values.astype("str"),
y_train,
batch_size=batch_size,
shuffle=True,
)
val_data = BertSemanticDataGenerator(
val_df[["question1", "question2"]].values.astype("str"),
y_val,
batch_size=batch_size,
shuffle=False,
)
history = model.fit(
train_data,
validation_data=val_data,
epochs=epochs,
use_multiprocessing=True,
workers=-1,
)
bert_model.trainable = True
# Recompile the model to make the change effective.
model.compile(
optimizer=tf.keras.optimizers.Adam(1e-5),
loss="categorical_crossentropy",
metrics=["accuracy"],
)
model.summary()
history = model.fit(
train_data,
validation_data=val_data,
epochs=epochs,
use_multiprocessing=True,
workers=-1,
)
After Waiting of 4-5 hours nowour model is trained!
test_data = BertSemanticDataGenerator(
test_df[["question1", "question2"]].values.astype("str"),
y_test,
batch_size=batch_size,
shuffle=False,
)
model.evaluate(test_data, verbose=1)
We Got 90% Accuracy on Test Dataset which is far better than Naive Bayes
def check_similarity(sentence1, sentence2):
sentence_pairs = np.array([[str(sentence1), str(sentence2)]])
test_data = BertSemanticDataGenerator(
sentence_pairs, labels=None, batch_size=1, shuffle=False, include_targets=False,
)
proba = model.predict(test_data)[0]
idx = np.argmax(proba)
proba = f"{proba[idx]: .2f}%"
pred = labels[idx]
return pred, proba
ind = np.random.randint(0,500)
#Duplicate Questions
q1 = test_df[test_df["is_duplicate"] == 1].iloc[ind]['question1']
q2 = test_df[test_df["is_duplicate"] == 1].iloc[ind]['question2']
print(q1+"\n"+q2)
check_similarity(q1,q2)
ind = np.random.randint(0,500)
#Non-Duplicate Questions
q1 = test_df[test_df["is_duplicate"] == 0].iloc[ind]['question1']
q2 = test_df[test_df["is_duplicate"] == 0].iloc[ind]['question2']
print(q1+"\n"+q2)
check_similarity(q1,q2)