Last active
March 25, 2016 19:06
-
-
Save nicolehe/77996f26a2286fbf8dfd to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import os | |
import random | |
import json | |
# path for the folder that has all the files | |
path = "/Users/nicolehe/Desktop/ITP/Spring 2016/Reading and Writing Electronic Text/midterm/" | |
for i in range(5): #this makes it run 5 times | |
domains = ['.com/', '.net/', '.org/', '.gov/', '.edu/', '.biz/'] | |
intros = ["Please visit the ", "Surf on over to the ", "Be sure to check out the ", "Dont miss the ", "Take a look at the ", "Check out the ", "Hey, you should go to the "] | |
modBegs = ["lil", "xx", "~", "miss", "TheOriginal", "", "", ""] | |
modEnds = ["3000", "99", "Y2K", "420", "69", "baby", "~", "", "", "", "", "4eva", "XOXO"] | |
#pick 2 files from the path if they are json files | |
pages = random.sample([x for x in os.listdir(path) if os.path.isfile(x) and x.endswith(".json") is True], 2) | |
#take the first subject and read the json | |
subject1 = open(pages[0]).read() | |
data1 = json.loads(subject1) | |
#take the second subject and read the json | |
subject2 = open(pages[1]).read() | |
data2 = json.loads(subject2) | |
#pick a random name section | |
namesList = list() | |
for name in open("first-names.txt"): | |
name = name.strip() | |
namesList.append(name) | |
titles = list() #hold the edited titles in a list | |
for page in pages: | |
page = page.replace("-", " ") #make the dashes into spaces | |
page = page[:-5] #get rid of the .json ending | |
page = page.title() #capitalize each word | |
titles.append(page) #add all these into the titles list | |
#a function to find the key in the json file that has the useful values | |
def findKey( data ): | |
for key in data.keys(): | |
if key != 'description': | |
return key | |
#find the values of this key | |
allWords1 = data1[findKey(data1)] | |
allWords2 = data2[findKey(data2)] | |
#pick 4 random words from the values, make a list | |
pickedWords1 = random.sample(allWords1, 4) | |
pickedWordsList1 = list() | |
pickedWords2 = random.sample(allWords2, 4) | |
pickedWordsList2 = list() | |
#edit the words and put them in the list above | |
for word in pickedWords1: | |
word = word.lower() | |
word = word.replace(" ", "-") | |
pickedWordsList1.append(word) | |
for word in pickedWords2: | |
word = word.lower() | |
word = word.replace(" ", "-") | |
pickedWordsList2.append(word) | |
#pick more words for the "topics" section, then add them to lists | |
hotTopics1 = random.sample(allWords1, 3) | |
hotTopicsList1 = list() | |
hotTopics2 = random.sample(allWords2, 3) | |
hotTopicsList2 = list() | |
for word in hotTopics1: | |
hotTopicsList1.append(word) | |
for word in hotTopics2: | |
hotTopicsList2.append(word) | |
#random picks for different parts | |
domainPick = random.choice(domains) | |
introPick = random.choice(intros) | |
namePick = random.choice(namesList) | |
modBegPick = random.choice(modBegs) | |
modEndPick = random.choice(modEnds) | |
subjects = " & ".join(titles) | |
subjects = "".join(random.choice([k.upper(), k ]) for k in subjects) #sticky caps | |
print "\n ~*~*~*~*~*~ " + introPick + "^_^ " + subjects + " ^_^ forum!!!" + "~*~*~*~*~*~ \n" | |
print " URL:" | |
print " http://" + pickedWordsList1[0] + "-" + pickedWordsList2[0] + domainPick + pickedWordsList1[1] + "-" + pickedWordsList2[1] + "/" + pickedWordsList2[2] + ".html" | |
print " MODERATOR: " | |
print " " + modBegPick + pickedWordsList2[3] + pickedWordsList1[3] + modEndPick | |
print " CONTACT: " | |
print " " + namePick.lower() + "@" + pickedWordsList1[0] + "-" + pickedWordsList2[0] + domainPick | |
print " POPULAR TOPICS: " | |
print " " + hotTopicsList1[0] + ", " + hotTopicsList2[0] + ", " + hotTopicsList1[1] + ", " + hotTopicsList2[1] + "\n" |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment