Skip to content

Commit ca3b927

Browse files
authored
adding basic code files for project and capability demos
1 parent af42b2a commit ca3b927

File tree

18 files changed

+1122
-0
lines changed

18 files changed

+1122
-0
lines changed

1 gradetracker/GradeTracker.py

Lines changed: 87 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,87 @@
1+
'''
2+
grade tracking program - think through the goal up front- what is the task and design?
3+
needs to enable several basic functions for teachers
4+
needs to have login to protect the student data
5+
'''
6+
#import libraries first
7+
import statistics as s
8+
9+
#add constants next
10+
admins = {'Faculty1':'ABC123','Faculty2':'ABC123'}
11+
12+
#Like the admins above is a dictionary but of students. Dictionaries use curly brackets with colons to associate keys with values. In this case, each student's first name is a key. The values are lists of grades.
13+
#Lists are denoted with square brackets. Values are indexed within starting with 0 for the first one. Each value is separated by commas.
14+
students = {'Alex':[87,88,98],
15+
'Sally':[88,67,93],
16+
'Nboke':[90,88,78]}
17+
18+
#Now we define functions. Functions encapsulate logic into reusable recipes that can be executed whenever we need them by calling their name with parentheses.
19+
def enterGrades():
20+
nameToEnter = input('Student name: ')
21+
gradeToEnter = input('Grade: ')
22+
#This checks through the keys of the students dictionary to see if the name entered exactly matches any one in there.
23+
if nameToEnter in students:
24+
print('Adding grade for'+nameToEnter)
25+
students[nameToEnter].append(float(gradeToEnter)) #float will have a .0
26+
print(str(nameToEnter)+' now has these grades:')
27+
print(students[nameToEnter])
28+
else:
29+
print('Student not found. Please check your spelling or go back and add if new.')
30+
31+
def removeStudent():
32+
nameToRemove = input('Who do you want to remove? ')
33+
if nameToRemove in students:
34+
print('Removing '+nameToRemove)
35+
del students[nameToRemove]
36+
print(students)
37+
else:
38+
print('Student not found.')
39+
40+
def averageStudents():
41+
for student in students:
42+
grades = students[student]
43+
average = s.mean(grades) #notice the s? we imported the statistuics module as s. Thus, we are using a fucntion called "mean()" from the statistics module.
44+
print(student,' average ',average)
45+
46+
def main():
47+
print("User: " + login)
48+
#Here we present our main menu options once a person logs in successfully.
49+
print("""
50+
Welcome to the Grade Tracker
51+
52+
[1] - Enter Grades
53+
[2] - Remove Student
54+
[3] - Student Averages
55+
[4] - Exit
56+
""")
57+
58+
action = input('What would you like to do? (Enter a number) ')
59+
#Here we process their choice of what they want to do.
60+
if action == '1':
61+
#print('1 selected')
62+
enterGrades()
63+
elif action == '2':
64+
#print('2 selected')
65+
removeStudent()
66+
elif action == '3':
67+
#print('3 selected')
68+
averageStudents()
69+
elif action == '4':
70+
#print('4 selected')
71+
exit()
72+
else:
73+
print('Valid option not selected.') #need to cause it to reprompt
74+
75+
login = input('User: ')
76+
77+
if login in admins:
78+
password = input('Password: ')
79+
if admins[login] == password:
80+
print('Welcome,',login)
81+
#now run the code
82+
while True:
83+
main()
84+
else:
85+
print('Invalid password.')
86+
else:
87+
print('Invalid user.')

2 ksu scrape/cms_scrape.py

Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,31 @@
1+
'''
2+
To use this code, you will first need to install the three packages being imported below using pip.
3+
'''
4+
from bs4 import BeautifulSoup
5+
import requests
6+
import csv
7+
source = requests.get('http://coreyms.com').text
8+
soup = BeautifulSoup(source, 'lxml')
9+
#print(soup.prettify())
10+
#article = soup.find('article')
11+
#method to get all
12+
articles = soup.find_all('article')
13+
#print(article.prettify())
14+
for article in articles:
15+
headline = article.h2.a.text
16+
print(headline)
17+
summary = article.find('div', class_='entry-content').p.text
18+
print(summary)
19+
#This try/except block handles the situation when a video is
20+
try:
21+
vid_src = article.find('iframe', class_='youtube-player')['src']
22+
#print(vid_src)
23+
vid_id = vid_src.split('/')[4]
24+
vid_id = vid_id.split('?')[0]
25+
#print(vid_id)
26+
#f method below is only avilable in Python 3.6 and after
27+
yt_link = f'https://youtube.com/watch?v={vid_id}'
28+
except Exception as e:
29+
yt_link = None
30+
print(yt_link)
31+
print()

2 ksu scrape/ksu_scrape.py

Lines changed: 58 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,58 @@
1+
'''
2+
TO use this code, you will first need to install the three packages being imported below using pip or a manual install method.
3+
'''
4+
from bs4 import BeautifulSoup
5+
import requests
6+
import csv
7+
from datetime import datetime
8+
9+
10+
source = requests.get('http://news.kennesaw.edu/in-the-news/?&categories=in%20the%20news&year=2018').text
11+
12+
soup = BeautifulSoup(source, 'lxml')
13+
14+
ksu_news_csv = open("ksu_news "+"{:%B %d, %Y}".format(datetime.now())+".csv","w")
15+
csv_writer = csv.writer(ksu_news_csv)
16+
17+
csv_writer.writerow(["Number","Title","Source","URL","Date"])
18+
19+
#print(soup.prettify())
20+
21+
#blog_post = soup.find('ul',class_='blog_listing')
22+
blog_posts = soup.find('ul',class_='blog_listing')
23+
24+
blog_posts = blog_posts.find_all('li')
25+
26+
#print(type(blog_posts))
27+
#blog_posts = blog_posts.split("<li>")
28+
#print(blog_posts.prettify())
29+
30+
i = 1
31+
for blog_post in blog_posts:
32+
33+
#print(i)
34+
title = blog_post.a.text
35+
title = title.split("(")
36+
justtitle = title[0]
37+
#print(title[0])
38+
if len(title)>1:
39+
source = title[1].strip(")")
40+
#print(source)
41+
else:
42+
source = "No Source"
43+
#print(source)
44+
45+
URL = blog_post.find('a')['href']
46+
#print(URL)
47+
48+
date = blog_post.find("span").text
49+
date = date.strip()
50+
date = date.strip("–")
51+
date = date.strip()
52+
#print(date)
53+
csv_writer.writerow([i,justtitle,source,URL,date])
54+
55+
i += 1
56+
print()
57+
58+
ksu_news_csv.close()

0 commit comments

Comments
 (0)