This repository has been archived by the owner on Nov 27, 2020. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 105
/
linkedin_learning.py
284 lines (222 loc) · 11 KB
/
linkedin_learning.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
import asyncio
import aiohttp
import aiohttp.cookiejar
import lxml.html
import re
import os
import logging
from itertools import chain, filterfalse, starmap
from collections import namedtuple
from urllib.parse import urljoin
from config import USERNAME, PASSWORD, COURSES, PROXY, BASE_DOWNLOAD_PATH
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
MAX_DOWNLOADS_SEMAPHORE = asyncio.Semaphore(10)
HEADERS = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36",
"Accept": "*/*",
}
URL = "https://www.linkedin.com"
LOGIN_URL = f"{URL}/login"
FILE_TYPE_VIDEO = ".mp4"
FILE_TYPE_SUBTITLE = ".srt"
COOKIE_JAR = aiohttp.cookiejar.CookieJar()
EXERCISE_FOLDER_PATH = "exercises"
Course = namedtuple("Course", ["name", "slug", "description", "unlocked", "chapters", "exercises"])
Chapter = namedtuple("Chapter", ["name", "videos", "index"])
Video = namedtuple("Video", ["name", "slug", "index", "filename"])
Exercise = namedtuple("Exercise", ["name", "url", "course", "index"])
def sub_format_time(ms):
seconds, milliseconds = divmod(ms, 1000)
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
return f'{hours:02}:{minutes:02}:{seconds:02},{milliseconds:02}'
def clean_dir_name(dir_name):
# Remove starting digit and dot (e.g '1. A' -> 'A')
# Remove bad characters (e.g 'A: B' -> 'A B')
no_digit = re.sub(r'^\d+\.', "", dir_name)
no_bad_chars = re.sub(r'[\\:<>"/|?*]', "", no_digit)
return no_bad_chars.strip()
def build_course(course_element: dict):
chapters = [
Chapter(name=chapter['title'],
videos=[
Video(name=video['title'],
slug=video['slug'],
index=idx,
filename=f"{str(idx).zfill(2)} - {clean_dir_name(video['title'])}{FILE_TYPE_VIDEO}"
)
for idx, video in enumerate(chapter['videos'], start=1)
],
index=idx)
for idx, chapter in enumerate(course_element['chapters'], start=1)
]
exercises = [
Exercise(name=exercise['name'],
url=exercise['url'],
course=course_element['title'],
index=idx)
for idx, exercise in enumerate(course_element['exerciseFiles'], start=1)
]
course = Course(name=course_element['title'],
slug=course_element['slug'],
description=course_element['description'],
unlocked=course_element['fullCourseUnlocked'],
chapters=chapters,
exercises=exercises)
return course
def chapter_dir(course: Course, chapter: Chapter):
folder_name = f"{str(chapter.index).zfill(2)} - {clean_dir_name(chapter.name)}"
chapter_path = os.path.join(BASE_DOWNLOAD_PATH, clean_dir_name(course.name), folder_name)
return chapter_path
def exercises_dir(exercise: Exercise):
folder_name = EXERCISE_FOLDER_PATH
exercise_path = os.path.join(BASE_DOWNLOAD_PATH, clean_dir_name(exercise.course), folder_name)
return exercise_path
async def login(username, password):
async with aiohttp.ClientSession(headers=HEADERS, cookie_jar=COOKIE_JAR) as session:
logging.info("[*] Login step 1 - Getting CSRF token...")
resp = await session.get(LOGIN_URL, proxy=PROXY)
body = await resp.text()
# Looking for CSRF Token
html = lxml.html.fromstring(body)
csrf = html.xpath("//input[@name='loginCsrfParam']/@value").pop()
logging.debug(f"[*] CSRF: {csrf}")
data = {
"session_key": username,
"session_password": password,
"loginCsrfParam": csrf,
"isJsEnabled": False
}
logging.info("[*] Login step 1 - Done")
logging.info("[*] Login step 2 - Logging In...")
await session.post(urljoin(URL, 'uas/login-submit'), proxy=PROXY, data=data)
if not next((x.value for x in session.cookie_jar if x.key.lower() == 'li_at'), False):
raise RuntimeError("[!] Could not login. Please check your credentials")
HEADERS['Csrf-Token'] = next(x.value for x in session.cookie_jar if x.key.lower() == 'jsessionid')
logging.info("[*] Login step 2 - Done")
async def fetch_courses():
return await asyncio.gather(*map(fetch_course, COURSES))
async def fetch_course(course_slug):
url = f"{URL}/learning-api/detailedCourses??fields=fullCourseUnlocked,releasedOn,exerciseFileUrls,exerciseFiles&" \
f"addParagraphsToTranscript=true&courseSlug={course_slug}&q=slugs"
async with aiohttp.ClientSession(headers=HEADERS, cookie_jar=COOKIE_JAR) as session:
resp = await session.get(url, proxy=PROXY, headers=HEADERS)
data = await resp.json()
course = build_course(data['elements'][0])
logging.info(f'[*] Fetching course {course.name}')
await fetch_chapters(course)
await fetch_exercises(course)
logging.info(f'[*] Finished fetching course "{course.name}"')
async def fetch_chapters(course: Course):
chapters_dirs = [chapter_dir(course, chapter) for chapter in course.chapters]
# Creating all missing directories
missing_directories = filterfalse(os.path.exists, chapters_dirs)
for d in missing_directories:
os.makedirs(d)
await asyncio.gather(*chain.from_iterable(fetch_chapter(course, chapter) for chapter in course.chapters))
async def fetch_exercises(course: Course):
if len(course.exercises) == 0:
return
# Creating the missing directory
exercise_dir = exercises_dir(course.exercises[0])
if not os.path.exists(exercise_dir):
os.makedirs(exercise_dir)
return await asyncio.gather(*map(fetch_zip_or_wait, course.exercises))
def fetch_chapter(course: Course, chapter: Chapter):
return (
fetch_video_or_wait(course, chapter, video)
for video in chapter.videos
)
async def fetch_video_or_wait(course: Course, chapter: Chapter, video: Video):
async with MAX_DOWNLOADS_SEMAPHORE:
await fetch_video(course, chapter, video)
async def fetch_zip_or_wait(exercise: Exercise):
async with MAX_DOWNLOADS_SEMAPHORE:
await fetch_zip(exercise)
async def fetch_video(course: Course, chapter: Chapter, video: Video):
subtitles_filename = os.path.splitext(video.filename)[0] + FILE_TYPE_SUBTITLE
video_file_path = os.path.join(chapter_dir(course, chapter), video.filename)
subtitle_file_path = os.path.join(chapter_dir(course, chapter), subtitles_filename)
video_exists = os.path.exists(video_file_path)
subtitle_exists = os.path.exists(subtitle_file_path)
if video_exists and subtitle_exists:
return
logging.info(f"[~] Fetching course '{course.name}' Chapter no. {chapter.index} Video no. {video.index}")
async with aiohttp.ClientSession(headers=HEADERS, cookie_jar=COOKIE_JAR) as session:
video_url = f'{URL}/learning-api/detailedCourses?addParagraphsToTranscript=false&courseSlug={course.slug}&' \
f'q=slugs&resolution=_720&videoSlug={video.slug}'
data = None
tries = 3
for _ in range(tries):
try:
resp = await session.get(video_url, proxy=PROXY, headers=HEADERS)
data = await resp.json()
resp.raise_for_status()
break
except aiohttp.client_exceptions.ClientResponseError:
pass
try:
subtitles = data['elements'][0]['selectedVideo']['transcript']
# This throws exception if the course is locked for the user as url is not available
video_url = data['elements'][0]['selectedVideo']['url']['progressiveUrl']
except Exception:
subtitles = None
duration_in_ms = int(data['elements'][0]['selectedVideo']['durationInSeconds']) * 1000
if not video_exists:
logging.info(f"[~] Writing {video.filename}")
await download_file(video_url, video_file_path)
if subtitles is not None:
logging.info(f"[~] Writing {subtitles_filename}")
subtitle_lines = subtitles['lines']
await write_subtitles(subtitle_lines, subtitle_file_path, duration_in_ms)
logging.info(f"[~] Done fetching course '{course.name}' Chapter no. {chapter.index} Video no. {video.index}")
async def fetch_zip(exercise: Exercise):
zip_file_path = os.path.join(exercises_dir(exercise), f"{str(exercise.index).zfill(2)} - {exercise.name}")
zip_exists = os.path.exists(zip_file_path)
if zip_exists:
return
logging.info(f"[~] Fetching zip '{exercise.name}' Exercise no. {exercise.index}")
await download_file(exercise.url, zip_file_path)
logging.info(f"[~] Done fetching zip '{exercise.name}' Exercise no. {exercise.index}")
async def write_subtitles(subs, output_path, video_duration):
def subs_to_lines(idx, sub):
starts_at = sub['transcriptStartAt']
ends_at = subs[idx]['transcriptStartAt'] if idx < len(subs) else video_duration
caption = sub['caption']
return f"{idx}\n" \
f"{sub_format_time(starts_at)} --> {sub_format_time(ends_at)}\n" \
f"{caption}\n\n"
with open(output_path, 'wb') as f:
for line in starmap(subs_to_lines, enumerate(subs, start=1)):
f.write(line.encode('utf8'))
timeout = aiohttp.ClientTimeout(total = 60*60)
async def download_file(url, output):
async with aiohttp.ClientSession(headers=HEADERS, cookie_jar=COOKIE_JAR) as session:
async with session.get(url, proxy=PROXY, headers=HEADERS, timeout=timeout) as r:
try:
with open(output, 'wb') as f:
while True:
chunk = await r.content.read(1024)
if not chunk:
break
f.write(chunk)
except Exception as e:
logging.exception(f"[!] Error while downloading: '{e}'")
if os.path.exists(output):
os.remove(output)
async def process():
try:
logging.info("[*] -------------Login-------------")
await login(USERNAME, PASSWORD)
logging.info("[*] -------------Done-------------")
logging.info("[*] -------------Fetching Course-------------")
await fetch_courses()
logging.info("[*] -------------Done-------------")
except aiohttp.client_exceptions.ClientProxyConnectionError as e:
logging.error(f"Proxy Error: {e}")
except aiohttp.client_exceptions.ClientConnectionError as e:
logging.error(f"Connection Error: {e}")
if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.run_until_complete(process())
loop.close()