Commit e76d436b authored by vikrantRajan's avatar vikrantRajan
Browse files

Updates

parent b1d598b1
......@@ -2,7 +2,7 @@ import cv2
# IMPORTING FACE DETECTION FILE & CONVERTING IT TO GRAY CAUSE ITS MORE ACCURATE
face_cascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
img = cv2.imread("news.jpg")
img = cv2.imread("photo.jpg")
gray_img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray_img, scaleFactor=1.1, minNeighbors=5)
......
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Bokeh Plot</title>
<script type="text/javascript" src="https://cdn.pydata.org/bokeh/release/bokeh-1.4.0.min.js"></script>
<script type="text/javascript">
Bokeh.set_log_level("info");
</script>
</head>
<body>
<div class="bk-root" id="5e9d78fb-9a2a-4b86-b20c-6ce21eca0ad0" data-root-id="1002"></div>
<script type="application/json" id="1283">
{"a8fc4c9f-f4f3-4377-a589-311aa64a644b":{"roots":{"references":[{"attributes":{"bottom_units":"screen","fill_alpha":{"value":0.5},"fill_color":{"value":"lightgrey"},"left_units":"screen","level":"overlay","line_alpha":{"value":1.0},"line_color":{"value":"black"},"line_dash":[4,4],"line_width":{"value":2},"render_mode":"css","right_units":"screen","top_units":"screen"},"id":"1048","type":"BoxAnnotation"},{"attributes":{"formatter":{"id":"1045","type":"BasicTickFormatter"},"minor_tick_line_color":{"value":null},"ticker":{"id":"1019","type":"BasicTicker"}},"id":"1018","type":"LinearAxis"},{"attributes":{"below":[{"id":"1013","type":"DatetimeAxis"}],"center":[{"id":"1017","type":"Grid"},{"id":"1022","type":"Grid"}],"left":[{"id":"1018","type":"LinearAxis"}],"plot_height":300,"plot_width":1000,"renderers":[{"id":"1041","type":"GlyphRenderer"}],"title":{"id":"1003","type":"Title"},"toolbar":{"id":"1029","type":"Toolbar"},"x_range":{"id":"1005","type":"DataRange1d"},"x_scale":{"id":"1009","type":"LinearScale"},"y_range":{"id":"1007","type":"DataRange1d"},"y_scale":{"id":"1011","type":"LinearScale"}},"id":"1002","subtype":"Figure","type":"Plot"},{"attributes":{"callback":null},"id":"1005","type":"DataRange1d"},{"attributes":{},"id":"1045","type":"BasicTickFormatter"},{"attributes":{"num_minor_ticks":5,"tickers":[{"id":"1049","type":"AdaptiveTicker"},{"id":"1050","type":"AdaptiveTicker"},{"id":"1051","type":"AdaptiveTicker"},{"id":"1052","type":"DaysTicker"},{"id":"1053","type":"DaysTicker"},{"id":"1054","type":"DaysTicker"},{"id":"1055","type":"DaysTicker"},{"id":"1056","type":"MonthsTicker"},{"id":"1057","type":"MonthsTicker"},{"id":"1058","type":"MonthsTicker"},{"id":"1059","type":"MonthsTicker"},{"id":"1060","type":"YearsTicker"}]},"id":"1014","type":"DatetimeTicker"},{"attributes":{"ticker":{"id":"1014","type":"DatetimeTicker"}},"id":"1017","type":"Grid"},{"attributes":{"desired_num_ticks":1},"id":"1019","type":"BasicTicker"},{"attributes":{"dimension":1,"ticker":{"id":"1019","type":"BasicTicker"}},"id":"1022","type":"Grid"},{"attributes":{},"id":"1011","type":"LinearScale"},{"attributes":{"mantissas":[1,2,5],"max_interval":500.0,"num_minor_ticks":0},"id":"1049","type":"AdaptiveTicker"},{"attributes":{"overlay":{"id":"1048","type":"BoxAnnotation"}},"id":"1025","type":"BoxZoomTool"},{"attributes":{"bottom":{"value":0},"fill_color":{"value":"green"},"left":{"field":"Start"},"line_color":{"value":"green"},"right":{"field":"End"},"top":{"value":1}},"id":"1039","type":"Quad"},{"attributes":{"base":60,"mantissas":[1,2,5,10,15,20,30],"max_interval":1800000.0,"min_interval":1000.0,"num_minor_ticks":0},"id":"1050","type":"AdaptiveTicker"},{"attributes":{"days":[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31]},"id":"1052","type":"DaysTicker"},{"attributes":{"formatter":{"id":"1047","type":"DatetimeTickFormatter"},"ticker":{"id":"1014","type":"DatetimeTicker"}},"id":"1013","type":"DatetimeAxis"},{"attributes":{},"id":"1028","type":"HelpTool"},{"attributes":{"base":24,"mantissas":[1,2,4,6,8,12],"max_interval":43200000.0,"min_interval":3600000.0,"num_minor_ticks":0},"id":"1051","type":"AdaptiveTicker"},{"attributes":{"active_drag":"auto","active_inspect":"auto","active_multi":null,"active_scroll":"auto","active_tap":"auto","tools":[{"id":"1023","type":"PanTool"},{"id":"1024","type":"WheelZoomTool"},{"id":"1025","type":"BoxZoomTool"},{"id":"1026","type":"SaveTool"},{"id":"1027","type":"ResetTool"},{"id":"1028","type":"HelpTool"},{"id":"1036","type":"HoverTool"}]},"id":"1029","type":"Toolbar"},{"attributes":{},"id":"1009","type":"LinearScale"},{"attributes":{"months":[0,1,2,3,4,5,6,7,8,9,10,11]},"id":"1056","type":"MonthsTicker"},{"attributes":{},"id":"1023","type":"PanTool"},{"attributes":{"months":[0,2,4,6,8,10]},"id":"1057","type":"MonthsTicker"},{"attributes":{"callback":null},"id":"1007","type":"DataRange1d"},{"attributes":{},"id":"1024","type":"WheelZoomTool"},{"attributes":{"days":[1,4,7,10,13,16,19,22,25,28]},"id":"1053","type":"DaysTicker"},{"attributes":{"days":[1,15]},"id":"1055","type":"DaysTicker"},{"attributes":{"callback":null,"tooltips":[["Start","@Start_string"],["End","@End_string"]]},"id":"1036","type":"HoverTool"},{"attributes":{},"id":"1026","type":"SaveTool"},{"attributes":{"months":[0,6]},"id":"1059","type":"MonthsTicker"},{"attributes":{},"id":"1027","type":"ResetTool"},{"attributes":{"days":[1,8,15,22]},"id":"1054","type":"DaysTicker"},{"attributes":{},"id":"1060","type":"YearsTicker"},{"attributes":{},"id":"1047","type":"DatetimeTickFormatter"},{"attributes":{"source":{"id":"1001","type":"ColumnDataSource"}},"id":"1042","type":"CDSView"},{"attributes":{"months":[0,4,8]},"id":"1058","type":"MonthsTicker"},{"attributes":{"bottom":{"value":0},"fill_alpha":{"value":0.1},"fill_color":{"value":"#1f77b4"},"left":{"field":"Start"},"line_alpha":{"value":0.1},"line_color":{"value":"#1f77b4"},"right":{"field":"End"},"top":{"value":1}},"id":"1040","type":"Quad"},{"attributes":{"data_source":{"id":"1001","type":"ColumnDataSource"},"glyph":{"id":"1039","type":"Quad"},"hover_glyph":null,"muted_glyph":null,"nonselection_glyph":{"id":"1040","type":"Quad"},"selection_glyph":null,"view":{"id":"1042","type":"CDSView"}},"id":"1041","type":"GlyphRenderer"},{"attributes":{},"id":"1062","type":"UnionRenderers"},{"attributes":{},"id":"1061","type":"Selection"},{"attributes":{"callback":null,"data":{"End":{"__ndarray__":"OVTKtcQAd0I=","dtype":"float64","shape":[1]},"End_string":["2020-02-03 18:23:50"],"Start":{"__ndarray__":"GV6CrcQAd0I=","dtype":"float64","shape":[1]},"Start_string":["2020-02-03 18:23:16"],"index":[0]},"selected":{"id":"1061","type":"Selection"},"selection_policy":{"id":"1062","type":"UnionRenderers"}},"id":"1001","type":"ColumnDataSource"},{"attributes":{"text":"Motion Graph"},"id":"1003","type":"Title"}],"root_ids":["1002"]},"title":"Bokeh Application","version":"1.4.0"}}
</script>
<script type="text/javascript">
(function() {
var fn = function() {
Bokeh.safely(function() {
(function(root) {
function embed_document(root) {
var docs_json = document.getElementById('1283').textContent;
var render_items = [{"docid":"a8fc4c9f-f4f3-4377-a589-311aa64a644b","roots":{"1002":"5e9d78fb-9a2a-4b86-b20c-6ce21eca0ad0"}}];
root.Bokeh.embed.embed_items(docs_json, render_items);
}
if (root.Bokeh !== undefined) {
embed_document(root);
} else {
var attempts = 0;
var timer = setInterval(function(root) {
if (root.Bokeh !== undefined) {
clearInterval(timer);
embed_document(root);
} else {
attempts++;
if (attempts > 100) {
clearInterval(timer);
console.log("Bokeh: ERROR: Unable to run BokehJS code because BokehJS library is missing");
}
}
}, 10, root)
}
})(window);
});
};
if (document.readyState != "loading") fn();
else document.addEventListener("DOMContentLoaded", fn);
})();
</script>
</body>
</html>
\ No newline at end of file
,Start,End
<<<<<<< HEAD
0,2019-12-24 19:35:55.136794,2019-12-24 19:35:58.152620
1,2019-12-24 19:36:08.084905,2019-12-24 19:36:14.872890
2,2019-12-24 19:36:17.911238,2019-12-24 19:36:20.188220
=======
0,2019-12-27 12:34:18.106721,2019-12-27 12:34:21.697614
>>>>>>> master
0,2020-02-03 18:23:16.517881,2020-02-03 18:23:50.437264
......@@ -17,5 +17,5 @@ p.add_tools(hover)
q = p.quad(left="Start", right = "End", bottom = 0, top = 1, color = "green", source = cds)
output_file("Graph1.html")
output_file("Graph2.html")
show(p)
\ No newline at end of file
,Start,End
0,2019-12-24 19:35:55.136794,2019-12-24 19:35:58.152620
1,2019-12-24 19:36:08.084905,2019-12-24 19:36:14.872890
2,2019-12-24 19:36:17.911238,2019-12-24 19:36:20.188220
0,2020-02-03 18:27:13.813472,2020-02-03 18:27:30.652769
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Data Cleaning"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Introduction"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"This notebook goes through a necessary step of any data science project - data cleaning. Data cleaning is a time consuming and unenjoyable task, yet it's a very important one. Keep in mind, \"garbage in, garbage out\". Feeding dirty data into a model will give us results that are meaningless.\n",
"\n",
"Specifically, we'll be walking through:\n",
"\n",
"1. **Getting the data - **in this case, we'll be scraping data from a website\n",
"2. **Cleaning the data - **we will walk through popular text pre-processing techniques\n",
"3. **Organizing the data - **we will organize the cleaned data into a way that is easy to input into other algorithms\n",
"\n",
"The output of this notebook will be clean, organized data in two standard text formats:\n",
"\n",
"1. **Corpus** - a collection of text\n",
"2. **Document-Term Matrix** - word counts in matrix format"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Problem Statement"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"As a reminder, our goal is to look at transcripts of various comedians and note their similarities and differences. Specifically, I'd like to know if Ali Wong's comedy style is different than other comedians, since she's the comedian that got me interested in stand up comedy."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Getting The Data"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Luckily, there are wonderful people online that keep track of stand up routine transcripts. [Scraps From The Loft](http://scrapsfromtheloft.com) makes them available for non-profit and educational purposes.\n",
"\n",
"To decide which comedians to look into, I went on IMDB and looked specifically at comedy specials that were released in the past 5 years. To narrow it down further, I looked only at those with greater than a 7.5/10 rating and more than 2000 votes. If a comedian had multiple specials that fit those requirements, I would pick the most highly rated one. I ended up with a dozen comedy specials."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"# Web scraping, pickle imports\n",
"import requests\n",
"from bs4 import BeautifulSoup\n",
"import pickle\n",
"\n",
"# Scrapes transcript data from scrapsfromtheloft.com\n",
"def url_to_transcript(url):\n",
" '''Returns transcript data specifically from scrapsfromtheloft.com.'''\n",
" page = requests.get(url).text\n",
" soup = BeautifulSoup(page, \"lxml\")\n",
" text = [p.text for p in soup.find(class_=\"post-content\").find_all('p')]\n",
" print(url)\n",
" return text\n",
"\n",
"# URLs of transcripts in scope\n",
"urls = ['http://scrapsfromtheloft.com/2017/05/06/louis-ck-oh-my-god-full-transcript/',\n",
" 'http://scrapsfromtheloft.com/2017/04/11/dave-chappelle-age-spin-2017-full-transcript/',\n",
" 'http://scrapsfromtheloft.com/2018/03/15/ricky-gervais-humanity-transcript/',\n",
" 'http://scrapsfromtheloft.com/2017/08/07/bo-burnham-2013-full-transcript/',\n",
" 'http://scrapsfromtheloft.com/2017/05/24/bill-burr-im-sorry-feel-way-2014-full-transcript/',\n",
" 'http://scrapsfromtheloft.com/2017/04/21/jim-jefferies-bare-2014-full-transcript/',\n",
" 'http://scrapsfromtheloft.com/2017/08/02/john-mulaney-comeback-kid-2015-full-transcript/',\n",
" 'http://scrapsfromtheloft.com/2017/10/21/hasan-minhaj-homecoming-king-2017-full-transcript/',\n",
" 'http://scrapsfromtheloft.com/2017/09/19/ali-wong-baby-cobra-2016-full-transcript/',\n",
" 'http://scrapsfromtheloft.com/2017/08/03/anthony-jeselnik-thoughts-prayers-2015-full-transcript/',\n",
" 'http://scrapsfromtheloft.com/2018/03/03/mike-birbiglia-my-girlfriends-boyfriend-2013-full-transcript/',\n",
" 'http://scrapsfromtheloft.com/2017/08/19/joe-rogan-triggered-2016-full-transcript/']\n",
"\n",
"# Comedian names\n",
"comedians = ['louis', 'dave', 'ricky', 'bo', 'bill', 'jim', 'john', 'hasan', 'ali', 'anthony', 'mike', 'joe']"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# # Actually request transcripts (takes a few minutes to run)\n",
"# transcripts = [url_to_transcript(u) for u in urls]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"# # Pickle files for later use\n",
"\n",
"# # Make a new directory to hold the text files\n",
"# !mkdir transcripts\n",
"\n",
"# for i, c in enumerate(comedians):\n",
"# with open(\"transcripts/\" + c + \".txt\", \"wb\") as file:\n",
"# pickle.dump(transcripts[i], file)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"# Load pickled files\n",
"data = {}\n",
"for i, c in enumerate(comedians):\n",
" with open(\"transcripts/\" + c + \".txt\", \"rb\") as file:\n",
" data[c] = pickle.load(file)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Double check to make sure data has been loaded properly\n",
"data.keys()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# More checks\n",
"data['louis'][:2]"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Cleaning The Data"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"When dealing with numerical data, data cleaning often involves removing null values and duplicate data, dealing with outliers, etc. With text data, there are some common data cleaning techniques, which are also known as text pre-processing techniques.\n",
"\n",
"With text data, this cleaning process can go on forever. There's always an exception to every cleaning step. So, we're going to follow the MVP (minimum viable product) approach - start simple and iterate. Here are a bunch of things you can do to clean your data. We're going to execute just the common cleaning steps here and the rest can be done at a later point to improve our results.\n",
"\n",
"**Common data cleaning steps on all text:**\n",
"* Make text all lower case\n",
"* Remove punctuation\n",
"* Remove numerical values\n",
"* Remove common non-sensical text (/n)\n",
"* Tokenize text\n",
"* Remove stop words\n",
"\n",
"**More data cleaning steps after tokenization:**\n",
"* Stemming / lemmatization\n",
"* Parts of speech tagging\n",
"* Create bi-grams or tri-grams\n",
"* Deal with typos\n",
"* And more..."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Let's take a look at our data again\n",
"next(iter(data.keys()))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Notice that our dictionary is currently in key: comedian, value: list of text format\n",
"next(iter(data.values()))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"# We are going to change this to key: comedian, value: string format\n",
"def combine_text(list_of_text):\n",
" '''Takes a list of text and combines them into one large chunk of text.'''\n",
" combined_text = ' '.join(list_of_text)\n",
" return combined_text"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"# Combine it!\n",
"data_combined = {key: [combine_text(value)] for (key, value) in data.items()}"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# We can either keep it in dictionary format or put it into a pandas dataframe\n",
"import pandas as pd\n",
"pd.set_option('max_colwidth',150)\n",
"\n",
"data_df = pd.DataFrame.from_dict(data_combined).transpose()\n",
"data_df.columns = ['transcript']\n",
"data_df = data_df.sort_index()\n",
"data_df"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Let's take a look at the transcript for Ali Wong\n",
"data_df.transcript.loc['ali']"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"# Apply a first round of text cleaning techniques\n",
"import re\n",
"import string\n",
"\n",
"def clean_text_round1(text):\n",
" '''Make text lowercase, remove text in square brackets, remove punctuation and remove words containing numbers.'''\n",
" text = text.lower()\n",
" text = re.sub('\\[.*?\\]', '', text)\n",
" text = re.sub('[%s]' % re.escape(string.punctuation), '', text)\n",
" text = re.sub('\\w*\\d\\w*', '', text)\n",
" return text\n",
"\n",
"round1 = lambda x: clean_text_round1(x)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Let's take a look at the updated text\n",
"data_clean = pd.DataFrame(data_df.transcript.apply(round1))\n",
"data_clean"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"# Apply a second round of cleaning\n",
"def clean_text_round2(text):\n",
" '''Get rid of some additional punctuation and non-sensical text that was missed the first time around.'''\n",
" text = re.sub('[‘’“”…]', '', text)\n",
" text = re.sub('\\n', '', text)\n",
" return text\n",
"\n",
"round2 = lambda x: clean_text_round2(x)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Let's take a look at the updated text\n",
"data_clean = pd.DataFrame(data_clean.transcript.apply(round2))\n",
"data_clean"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"**NOTE:** This data cleaning aka text pre-processing step could go on for a while, but we are going to stop for now. After going through some analysis techniques, if you see that the results don't make sense or could be improved, you can come back and make more edits such as:\n",
"* Mark 'cheering' and 'cheer' as the same word (stemming / lemmatization)\n",
"* Combine 'thank you' into one term (bi-grams)\n",
"* And a lot more..."
]
},
{
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Hello World"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
This diff is collapsed.
{
"cells": [
{
"cell_type": "markdown",
"metadata": {
"collapsed": true
},
"source": [
"# Exploratory Data Analysis"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Introduction"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"After the data cleaning step where we put our data into a few standard formats, the next step is to take a look at the data and see if what we're looking at makes sense. Before applying any fancy algorithms, it's always important to explore the data first.\n",
"\n",
"When working with numerical data, some of the exploratory data analysis (EDA) techniques we can use include finding the average of the data set, the distribution of the data, the most common values, etc. The idea is the same when working with text data. We are going to find some more obvious patterns with EDA before identifying the hidden patterns with machines learning (ML) techniques. We are going to look at the following for each comedian:\n",
"\n",
"1. **Most common words** - find these and create word clouds\n",
"2. **Size of vocabulary** - look number of unique words and also how quickly someone speaks\n",
"3. **Amount of profanity** - most common terms"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Most Common Words"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Analysis"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Read in the document-term matrix\n",
"import pandas as pd\n",
"\n",
"data = pd.read_pickle('dtm.pkl')\n",
"data = data.transpose()\n",
"data.head()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Find the top 30 words said by each comedian\n",
"top_dict = {}\n",
"for c in data.columns:\n",
" top = data[c].sort_values(ascending=False).head(30)\n",
" top_dict[c]= list(zip(top.index, top.values))\n",
"\n",
"top_dict"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],