Skip to content

Instantly share code, notes, and snippets.

from sumy.parsers.html import HtmlParser
from sumy.nlp.tokenizers import Tokenizer
from sumy.summarizers.lsa import LsaSummarizer as Summarizer
from sumy.nlp.stemmers import Stemmer
from sumy.utils import get_stop_words
import requests
import streamlit as st
st.title('News Summarizer')
# Gives option between top stories and search term
search_choice = st.sidebar.radio('', options=['Top Headlines', 'Search Term'])
# Takes user input for max number of sentences per summary
sentences_count = st.sidebar.slider('Max sentences per summary:', min_value=1,
max_value=10,
import streamlit as st
st.title('News Summarizer')
def search_articles(sentences_count: int, **kwargs) -> list:
"""
Sends GET request to News API /v2/everything endpoint,
and summarizes data at each URL
Inputs
----------
sentences_count: specifies max number of sentences
for return value
kwargs: see News API
import streamlit as st
st.title('News Summarizer')
# Gives option between top stories and search term
search_choice = st.sidebar.radio('', options=['Top Headlines', 'Search Term'])
# Takes user input for max number of sentences per summary
sentences_count = st.sidebar.slider('Max sentences per summary:', min_value=1,
max_value=10,
def summarize_news_api(articles: list, sentences_count: int) -> list:
"""
summarizes text at URL for each element of articles dict
(return value from news_api_request) and adds a new element
articles dict where the key is 'summary' and the value is
the summarized text
Inputs
----------
articles: list of dict returned from news_api_request()
url = 'https://en.wikipedia.org/wiki/Automatic_summarization'
summarize_html(url, 10)
import os
from app_functions import get_top_headlines, search_articles
import streamlit as st
API_KEY = os.environ['NEWS_API_KEY']
st.title('News Summarizer')
search_choice = st.sidebar.radio('', options=['Top Headlines', 'Search Term'])
sentences_count = st.sidebar.slider('Max sentences per summary:', min_value=1,
from sumy.parsers.html import HtmlParser
from sumy.nlp.tokenizers import Tokenizer
from sumy.summarizers.lsa import LsaSummarizer as Summarizer
from sumy.nlp.stemmers import Stemmer
from sumy.utils import get_stop_words
import requests
def summarize_html(url: str, sentences_count: int, language: str = 'english') -> str:
"""
streamlit run streamlit_example.py