""" Importing the necessary python modules """
from nltk.tokenize import word_tokenize
from nltk import sent_tokenize

""" Input paragraph """
my_para = "This is first sentence. And this is second."

""" Splitting the paragraph into sentences"""
sentences = sent_tokenize(my_para)
print sentences[0]
# This is first sentence. #
print sentences[1]
# And this is second. #

""" Splitting the first sentence into words """
print word_tokenize(sentences[0])
# ['This', 'is', 'first', 'sentence', '.'] #