Commit e76d436b authored by vikrantRajan's avatar vikrantRajan
Browse files

Updates

parent b1d598b1
......@@ -2,7 +2,7 @@ import cv2
# IMPORTING FACE DETECTION FILE & CONVERTING IT TO GRAY CAUSE ITS MORE ACCURATE
face_cascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
img = cv2.imread("news.jpg")
img = cv2.imread("photo.jpg")
gray_img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray_img, scaleFactor=1.1, minNeighbors=5)
......
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Bokeh Plot</title>
<script type="text/javascript" src="https://cdn.pydata.org/bokeh/release/bokeh-1.4.0.min.js"></script>
<script type="text/javascript">
Bokeh.set_log_level("info");
</script>
</head>
<body>
<div class="bk-root" id="5e9d78fb-9a2a-4b86-b20c-6ce21eca0ad0" data-root-id="1002"></div>
<script type="application/json" id="1283">
{"a8fc4c9f-f4f3-4377-a589-311aa64a644b":{"roots":{"references":[{"attributes":{"bottom_units":"screen","fill_alpha":{"value":0.5},"fill_color":{"value":"lightgrey"},"left_units":"screen","level":"overlay","line_alpha":{"value":1.0},"line_color":{"value":"black"},"line_dash":[4,4],"line_width":{"value":2},"render_mode":"css","right_units":"screen","top_units":"screen"},"id":"1048","type":"BoxAnnotation"},{"attributes":{"formatter":{"id":"1045","type":"BasicTickFormatter"},"minor_tick_line_color":{"value":null},"ticker":{"id":"1019","type":"BasicTicker"}},"id":"1018","type":"LinearAxis"},{"attributes":{"below":[{"id":"1013","type":"DatetimeAxis"}],"center":[{"id":"1017","type":"Grid"},{"id":"1022","type":"Grid"}],"left":[{"id":"1018","type":"LinearAxis"}],"plot_height":300,"plot_width":1000,"renderers":[{"id":"1041","type":"GlyphRenderer"}],"title":{"id":"1003","type":"Title"},"toolbar":{"id":"1029","type":"Toolbar"},"x_range":{"id":"1005","type":"DataRange1d"},"x_scale":{"id":"1009","type":"LinearScale"},"y_range":{"id":"1007","type":"DataRange1d"},"y_scale":{"id":"1011","type":"LinearScale"}},"id":"1002","subtype":"Figure","type":"Plot"},{"attributes":{"callback":null},"id":"1005","type":"DataRange1d"},{"attributes":{},"id":"1045","type":"BasicTickFormatter"},{"attributes":{"num_minor_ticks":5,"tickers":[{"id":"1049","type":"AdaptiveTicker"},{"id":"1050","type":"AdaptiveTicker"},{"id":"1051","type":"AdaptiveTicker"},{"id":"1052","type":"DaysTicker"},{"id":"1053","type":"DaysTicker"},{"id":"1054","type":"DaysTicker"},{"id":"1055","type":"DaysTicker"},{"id":"1056","type":"MonthsTicker"},{"id":"1057","type":"MonthsTicker"},{"id":"1058","type":"MonthsTicker"},{"id":"1059","type":"MonthsTicker"},{"id":"1060","type":"YearsTicker"}]},"id":"1014","type":"DatetimeTicker"},{"attributes":{"ticker":{"id":"1014","type":"DatetimeTicker"}},"id":"1017","type":"Grid"},{"attributes":{"desired_num_ticks":1},"id":"1019","type":"BasicTicker"},{"attributes":{"dimension":1,"ticker":{"id":"1019","type":"BasicTicker"}},"id":"1022","type":"Grid"},{"attributes":{},"id":"1011","type":"LinearScale"},{"attributes":{"mantissas":[1,2,5],"max_interval":500.0,"num_minor_ticks":0},"id":"1049","type":"AdaptiveTicker"},{"attributes":{"overlay":{"id":"1048","type":"BoxAnnotation"}},"id":"1025","type":"BoxZoomTool"},{"attributes":{"bottom":{"value":0},"fill_color":{"value":"green"},"left":{"field":"Start"},"line_color":{"value":"green"},"right":{"field":"End"},"top":{"value":1}},"id":"1039","type":"Quad"},{"attributes":{"base":60,"mantissas":[1,2,5,10,15,20,30],"max_interval":1800000.0,"min_interval":1000.0,"num_minor_ticks":0},"id":"1050","type":"AdaptiveTicker"},{"attributes":{"days":[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31]},"id":"1052","type":"DaysTicker"},{"attributes":{"formatter":{"id":"1047","type":"DatetimeTickFormatter"},"ticker":{"id":"1014","type":"DatetimeTicker"}},"id":"1013","type":"DatetimeAxis"},{"attributes":{},"id":"1028","type":"HelpTool"},{"attributes":{"base":24,"mantissas":[1,2,4,6,8,12],"max_interval":43200000.0,"min_interval":3600000.0,"num_minor_ticks":0},"id":"1051","type":"AdaptiveTicker"},{"attributes":{"active_drag":"auto","active_inspect":"auto","active_multi":null,"active_scroll":"auto","active_tap":"auto","tools":[{"id":"1023","type":"PanTool"},{"id":"1024","type":"WheelZoomTool"},{"id":"1025","type":"BoxZoomTool"},{"id":"1026","type":"SaveTool"},{"id":"1027","type":"ResetTool"},{"id":"1028","type":"HelpTool"},{"id":"1036","type":"HoverTool"}]},"id":"1029","type":"Toolbar"},{"attributes":{},"id":"1009","type":"LinearScale"},{"attributes":{"months":[0,1,2,3,4,5,6,7,8,9,10,11]},"id":"1056","type":"MonthsTicker"},{"attributes":{},"id":"1023","type":"PanTool"},{"attributes":{"months":[0,2,4,6,8,10]},"id":"1057","type":"MonthsTicker"},{"attributes":{"callback":null},"id":"1007","type":"DataRange1d"},{"attributes":{},"id":"1024","type":"WheelZoomTool"},{"attributes":{"days":[1,4,7,10,13,16,19,22,25,28]},"id":"1053","type":"DaysTicker"},{"attributes":{"days":[1,15]},"id":"1055","type":"DaysTicker"},{"attributes":{"callback":null,"tooltips":[["Start","@Start_string"],["End","@End_string"]]},"id":"1036","type":"HoverTool"},{"attributes":{},"id":"1026","type":"SaveTool"},{"attributes":{"months":[0,6]},"id":"1059","type":"MonthsTicker"},{"attributes":{},"id":"1027","type":"ResetTool"},{"attributes":{"days":[1,8,15,22]},"id":"1054","type":"DaysTicker"},{"attributes":{},"id":"1060","type":"YearsTicker"},{"attributes":{},"id":"1047","type":"DatetimeTickFormatter"},{"attributes":{"source":{"id":"1001","type":"ColumnDataSource"}},"id":"1042","type":"CDSView"},{"attributes":{"months":[0,4,8]},"id":"1058","type":"MonthsTicker"},{"attributes":{"bottom":{"value":0},"fill_alpha":{"value":0.1},"fill_color":{"value":"#1f77b4"},"left":{"field":"Start"},"line_alpha":{"value":0.1},"line_color":{"value":"#1f77b4"},"right":{"field":"End"},"top":{"value":1}},"id":"1040","type":"Quad"},{"attributes":{"data_source":{"id":"1001","type":"ColumnDataSource"},"glyph":{"id":"1039","type":"Quad"},"hover_glyph":null,"muted_glyph":null,"nonselection_glyph":{"id":"1040","type":"Quad"},"selection_glyph":null,"view":{"id":"1042","type":"CDSView"}},"id":"1041","type":"GlyphRenderer"},{"attributes":{},"id":"1062","type":"UnionRenderers"},{"attributes":{},"id":"1061","type":"Selection"},{"attributes":{"callback":null,"data":{"End":{"__ndarray__":"OVTKtcQAd0I=","dtype":"float64","shape":[1]},"End_string":["2020-02-03 18:23:50"],"Start":{"__ndarray__":"GV6CrcQAd0I=","dtype":"float64","shape":[1]},"Start_string":["2020-02-03 18:23:16"],"index":[0]},"selected":{"id":"1061","type":"Selection"},"selection_policy":{"id":"1062","type":"UnionRenderers"}},"id":"1001","type":"ColumnDataSource"},{"attributes":{"text":"Motion Graph"},"id":"1003","type":"Title"}],"root_ids":["1002"]},"title":"Bokeh Application","version":"1.4.0"}}
</script>
<script type="text/javascript">
(function() {
var fn = function() {
Bokeh.safely(function() {
(function(root) {
function embed_document(root) {
var docs_json = document.getElementById('1283').textContent;
var render_items = [{"docid":"a8fc4c9f-f4f3-4377-a589-311aa64a644b","roots":{"1002":"5e9d78fb-9a2a-4b86-b20c-6ce21eca0ad0"}}];
root.Bokeh.embed.embed_items(docs_json, render_items);
}
if (root.Bokeh !== undefined) {
embed_document(root);
} else {
var attempts = 0;
var timer = setInterval(function(root) {
if (root.Bokeh !== undefined) {
clearInterval(timer);
embed_document(root);
} else {
attempts++;
if (attempts > 100) {
clearInterval(timer);
console.log("Bokeh: ERROR: Unable to run BokehJS code because BokehJS library is missing");
}
}
}, 10, root)
}
})(window);
});
};
if (document.readyState != "loading") fn();
else document.addEventListener("DOMContentLoaded", fn);
})();
</script>
</body>
</html>
\ No newline at end of file
,Start,End
<<<<<<< HEAD
0,2019-12-24 19:35:55.136794,2019-12-24 19:35:58.152620
1,2019-12-24 19:36:08.084905,2019-12-24 19:36:14.872890
2,2019-12-24 19:36:17.911238,2019-12-24 19:36:20.188220
=======
0,2019-12-27 12:34:18.106721,2019-12-27 12:34:21.697614
>>>>>>> master
0,2020-02-03 18:23:16.517881,2020-02-03 18:23:50.437264
......@@ -17,5 +17,5 @@ p.add_tools(hover)
q = p.quad(left="Start", right = "End", bottom = 0, top = 1, color = "green", source = cds)
output_file("Graph1.html")
output_file("Graph2.html")
show(p)
\ No newline at end of file
,Start,End
0,2019-12-24 19:35:55.136794,2019-12-24 19:35:58.152620
1,2019-12-24 19:36:08.084905,2019-12-24 19:36:14.872890
2,2019-12-24 19:36:17.911238,2019-12-24 19:36:20.188220
0,2020-02-03 18:27:13.813472,2020-02-03 18:27:30.652769
%% Cell type:markdown id: tags:
# Data Cleaning
%% Cell type:markdown id: tags:
## Introduction
%% Cell type:markdown id: tags:
This notebook goes through a necessary step of any data science project - data cleaning. Data cleaning is a time consuming and unenjoyable task, yet it's a very important one. Keep in mind, "garbage in, garbage out". Feeding dirty data into a model will give us results that are meaningless.
Specifically, we'll be walking through:
1. **Getting the data - **in this case, we'll be scraping data from a website
2. **Cleaning the data - **we will walk through popular text pre-processing techniques
3. **Organizing the data - **we will organize the cleaned data into a way that is easy to input into other algorithms
The output of this notebook will be clean, organized data in two standard text formats:
1. **Corpus** - a collection of text
2. **Document-Term Matrix** - word counts in matrix format
%% Cell type:markdown id: tags:
## Problem Statement
%% Cell type:markdown id: tags:
As a reminder, our goal is to look at transcripts of various comedians and note their similarities and differences. Specifically, I'd like to know if Ali Wong's comedy style is different than other comedians, since she's the comedian that got me interested in stand up comedy.
%% Cell type:markdown id: tags:
## Getting The Data
%% Cell type:markdown id: tags:
Luckily, there are wonderful people online that keep track of stand up routine transcripts. [Scraps From The Loft](http://scrapsfromtheloft.com) makes them available for non-profit and educational purposes.
To decide which comedians to look into, I went on IMDB and looked specifically at comedy specials that were released in the past 5 years. To narrow it down further, I looked only at those with greater than a 7.5/10 rating and more than 2000 votes. If a comedian had multiple specials that fit those requirements, I would pick the most highly rated one. I ended up with a dozen comedy specials.
%% Cell type:code id: tags:
``` python
# Web scraping, pickle imports
import requests
from bs4 import BeautifulSoup
import pickle
# Scrapes transcript data from scrapsfromtheloft.com
def url_to_transcript(url):
'''Returns transcript data specifically from scrapsfromtheloft.com.'''
page = requests.get(url).text
soup = BeautifulSoup(page, "lxml")
text = [p.text for p in soup.find(class_="post-content").find_all('p')]
print(url)
return text
# URLs of transcripts in scope
urls = ['http://scrapsfromtheloft.com/2017/05/06/louis-ck-oh-my-god-full-transcript/',
'http://scrapsfromtheloft.com/2017/04/11/dave-chappelle-age-spin-2017-full-transcript/',
'http://scrapsfromtheloft.com/2018/03/15/ricky-gervais-humanity-transcript/',
'http://scrapsfromtheloft.com/2017/08/07/bo-burnham-2013-full-transcript/',
'http://scrapsfromtheloft.com/2017/05/24/bill-burr-im-sorry-feel-way-2014-full-transcript/',
'http://scrapsfromtheloft.com/2017/04/21/jim-jefferies-bare-2014-full-transcript/',
'http://scrapsfromtheloft.com/2017/08/02/john-mulaney-comeback-kid-2015-full-transcript/',
'http://scrapsfromtheloft.com/2017/10/21/hasan-minhaj-homecoming-king-2017-full-transcript/',
'http://scrapsfromtheloft.com/2017/09/19/ali-wong-baby-cobra-2016-full-transcript/',
'http://scrapsfromtheloft.com/2017/08/03/anthony-jeselnik-thoughts-prayers-2015-full-transcript/',
'http://scrapsfromtheloft.com/2018/03/03/mike-birbiglia-my-girlfriends-boyfriend-2013-full-transcript/',
'http://scrapsfromtheloft.com/2017/08/19/joe-rogan-triggered-2016-full-transcript/']
# Comedian names
comedians = ['louis', 'dave', 'ricky', 'bo', 'bill', 'jim', 'john', 'hasan', 'ali', 'anthony', 'mike', 'joe']
```
%% Cell type:code id: tags:
``` python
# # Actually request transcripts (takes a few minutes to run)
# transcripts = [url_to_transcript(u) for u in urls]
```
%% Cell type:code id: tags:
``` python
# # Pickle files for later use
# # Make a new directory to hold the text files
# !mkdir transcripts
# for i, c in enumerate(comedians):
# with open("transcripts/" + c + ".txt", "wb") as file:
# pickle.dump(transcripts[i], file)
```
%% Cell type:code id: tags:
``` python
# Load pickled files
data = {}
for i, c in enumerate(comedians):
with open("transcripts/" + c + ".txt", "rb") as file:
data[c] = pickle.load(file)
```
%% Cell type:code id: tags:
``` python
# Double check to make sure data has been loaded properly
data.keys()
```
%% Cell type:code id: tags:
``` python
# More checks
data['louis'][:2]
```
%% Cell type:markdown id: tags:
## Cleaning The Data
%% Cell type:markdown id: tags:
When dealing with numerical data, data cleaning often involves removing null values and duplicate data, dealing with outliers, etc. With text data, there are some common data cleaning techniques, which are also known as text pre-processing techniques.
With text data, this cleaning process can go on forever. There's always an exception to every cleaning step. So, we're going to follow the MVP (minimum viable product) approach - start simple and iterate. Here are a bunch of things you can do to clean your data. We're going to execute just the common cleaning steps here and the rest can be done at a later point to improve our results.
**Common data cleaning steps on all text:**
* Make text all lower case
* Remove punctuation
* Remove numerical values
* Remove common non-sensical text (/n)
* Tokenize text
* Remove stop words
**More data cleaning steps after tokenization:**
* Stemming / lemmatization
* Parts of speech tagging
* Create bi-grams or tri-grams
* Deal with typos
* And more...
%% Cell type:code id: tags:
``` python
# Let's take a look at our data again
next(iter(data.keys()))
```
%% Cell type:code id: tags:
``` python
# Notice that our dictionary is currently in key: comedian, value: list of text format
next(iter(data.values()))
```
%% Cell type:code id: tags:
``` python
# We are going to change this to key: comedian, value: string format
def combine_text(list_of_text):
'''Takes a list of text and combines them into one large chunk of text.'''
combined_text = ' '.join(list_of_text)
return combined_text
```
%% Cell type:code id: tags:
``` python
# Combine it!
data_combined = {key: [combine_text(value)] for (key, value) in data.items()}
```
%% Cell type:code id: tags:
``` python
# We can either keep it in dictionary format or put it into a pandas dataframe
import pandas as pd
pd.set_option('max_colwidth',150)
data_df = pd.DataFrame.from_dict(data_combined).transpose()
data_df.columns = ['transcript']
data_df = data_df.sort_index()
data_df
```
%% Cell type:code id: tags:
``` python
# Let's take a look at the transcript for Ali Wong
data_df.transcript.loc['ali']
```
%% Cell type:code id: tags:
``` python
# Apply a first round of text cleaning techniques
import re
import string
def clean_text_round1(text):
'''Make text lowercase, remove text in square brackets, remove punctuation and remove words containing numbers.'''
text = text.lower()
text = re.sub('\[.*?\]', '', text)
text = re.sub('[%s]' % re.escape(string.punctuation), '', text)
text = re.sub('\w*\d\w*', '', text)
return text
round1 = lambda x: clean_text_round1(x)
```
%% Cell type:code id: tags:
``` python
# Let's take a look at the updated text
data_clean = pd.DataFrame(data_df.transcript.apply(round1))
data_clean
```
%% Cell type:code id: tags:
``` python
# Apply a second round of cleaning
def clean_text_round2(text):
'''Get rid of some additional punctuation and non-sensical text that was missed the first time around.'''
text = re.sub('[‘’“”…]', '', text)
text = re.sub('\n', '', text)
return text
round2 = lambda x: clean_text_round2(x)
```
%% Cell type:code id: tags:
``` python
# Let's take a look at the updated text
data_clean = pd.DataFrame(data_clean.transcript.apply(round2))
data_clean
```
%% Cell type:markdown id: tags:
**NOTE:** This data cleaning aka text pre-processing step could go on for a while, but we are going to stop for now. After going through some analysis techniques, if you see that the results don't make sense or could be improved, you can come back and make more edits such as:
* Mark 'cheering' and 'cheer' as the same word (stemming / lemmatization)
* Combine 'thank you' into one term (bi-grams)
* And a lot more...
%% Cell type:markdown id: tags:
## Organizing The Data
%% Cell type:markdown id: tags:
I mentioned earlier that the output of this notebook will be clean, organized data in two standard text formats:
1. **Corpus - **a collection of text
2. **Document-Term Matrix - **word counts in matrix format
%% Cell type:markdown id: tags:
### Corpus
%% Cell type:markdown id: tags:
We already created a corpus in an earlier step. The definition of a corpus is a collection of texts, and they are all put together neatly in a pandas dataframe here.
%% Cell type:code id: tags:
``` python
# Let's take a look at our dataframe
data_df
```
%% Cell type:code id: tags:
``` python
# Let's add the comedians' full names as well
full_names = ['Ali Wong', 'Anthony Jeselnik', 'Bill Burr', 'Bo Burnham', 'Dave Chappelle', 'Hasan Minhaj',
'Jim Jefferies', 'Joe Rogan', 'John Mulaney', 'Louis C.K.', 'Mike Birbiglia', 'Ricky Gervais']
data_df['full_name'] = full_names
data_df
```
%% Cell type:code id: tags:
``` python
# Let's pickle it for later use
data_df.to_pickle("corpus.pkl")
```
%% Cell type:markdown id: tags:
### Document-Term Matrix
%% Cell type:markdown id: tags:
For many of the techniques we'll be using in future notebooks, the text must be tokenized, meaning broken down into smaller pieces. The most common tokenization technique is to break down text into words. We can do this using scikit-learn's CountVectorizer, where every row will represent a different document and every column will represent a different word.
In addition, with CountVectorizer, we can remove stop words. Stop words are common words that add no additional meaning to text such as 'a', 'the', etc.
%% Cell type:code id: tags:
``` python
# We are going to create a document-term matrix using CountVectorizer, and exclude common English stop words
from sklearn.feature_extraction.text import CountVectorizer
cv = CountVectorizer(stop_words='english')
data_cv = cv.fit_transform(data_clean.transcript)
data_dtm = pd.DataFrame(data_cv.toarray(), columns=cv.get_feature_names())
data_dtm.index = data_clean.index
data_dtm
```
%% Cell type:code id: tags:
``` python
# Let's pickle it for later use
data_dtm.to_pickle("dtm.pkl")
```
%% Cell type:code id: tags:
``` python
# Let's also pickle the cleaned data (before we put it in document-term matrix format) and the CountVectorizer object
data_clean.to_pickle('data_clean.pkl')
pickle.dump(cv, open("cv.pkl", "wb"))
```
%% Cell type:markdown id: tags:
## Additional Exercises
%% Cell type:markdown id: tags:
1. Can you add an additional regular expression to the clean_text_round2 function to further clean the text?
2. Play around with CountVectorizer's parameters. What is ngram_range? What is min_df and max_df?
%% Cell type:code id: tags:
``` python
```
%% Cell type:markdown id: tags:
### Hello World
%% Cell type:markdown id: tags:
Place your cursor in the cell below, and type Shift-Enter. If you see the printed statement below the cell, you are good to go!
%% Cell type:code id: tags:
``` python
print('hello world')
```
This diff is collapsed.
%% Cell type:markdown id: tags:
# Exploratory Data Analysis
%% Cell type:markdown id: tags:
## Introduction
%% Cell type:markdown id: tags:
After the data cleaning step where we put our data into a few standard formats, the next step is to take a look at the data and see if what we're looking at makes sense. Before applying any fancy algorithms, it's always important to explore the data first.
When working with numerical data, some of the exploratory data analysis (EDA) techniques we can use include finding the average of the data set, the distribution of the data, the most common values, etc. The idea is the same when working with text data. We are going to find some more obvious patterns with EDA before identifying the hidden patterns with machines learning (ML) techniques. We are going to look at the following for each comedian:
1. **Most common words** - find these and create word clouds
2. **Size of vocabulary** - look number of unique words and also how quickly someone speaks
3. **Amount of profanity** - most common terms
%% Cell type:markdown id: tags:
## Most Common Words
%% Cell type:markdown id: tags:
### Analysis
%% Cell type:code id: tags:
``` python
# Read in the document-term matrix
import pandas as pd
data = pd.read_pickle('dtm.pkl')
data = data.transpose()
data.head()
```
%% Cell type:code id: tags:
``` python
# Find the top 30 words said by each comedian
top_dict = {}
for c in data.columns:
top = data[c].sort_values(ascending=False).head(30)
top_dict[c]= list(zip(top.index, top.values))
top_dict
```
%% Cell type:code id: tags:
``` python
# Print the top 15 words said by each comedian
for comedian, top_words in top_dict.items():
print(comedian)
print(', '.join([word for word, count in top_words[0:14]]))
print('---')
```
%% Cell type:markdown id: tags:
**NOTE:** At this point, we could go on and create word clouds. However, by looking at these top words, you can see that some of them have very little meaning and could be added to a stop words list, so let's do just that.
%% Cell type:code id: tags:
``` python
# Look at the most common top words --> add them to the stop word list