Newer
Older
from sklearn.feature_extraction.text import TfidfVectorizer
import sentence_selection
import summarization
def make_timeline(articles, gold_timeline, keywords):
timeline = {}
num_dates = len(gold_timeline)
avg_num_sentences = sum([len(gold_timeline[date]) for date in gold_timeline]) // len(gold_timeline)
# keep only the articles published within the gold timeline's range
start_date = min(gold_timeline.keys())
end_date = max(gold_timeline.keys())
articles = [a for a in articles if start_date <= a['pub_date'] <= end_date]
ranked_dates = date_selection.select_dates_by_mention_count(articles, start_date, end_date)
# train TFIDF vectorizer on all sentences (not just the ones for this date)
all_sentences = [sentence['text'] for article in articles for sentence in article['sentences']]
vectorizer = TfidfVectorizer(stop_words='english', lowercase=True)
vectorizer.fit(all_sentences)
for date in ranked_dates:
if len(timeline) >= num_dates:
break
# select candidate sentences for date
candidate_sentences = sentence_selection.candidate_sentences(articles, date, vectorizer)
if not candidate_sentences:
continue
# build summary for date
summary_for_date = summarization.summarize(candidate_sentences, vectorizer, keywords, num_sentences=avg_num_sentences)
if not summary_for_date:
continue
timeline[date] = summary_for_date
# sort timeline by date
timeline = {date: timeline[date] for date in sorted(timeline.keys())}
def print_timeline(timeline, indent=4, start_indent=0, file=None):
for date in sorted(timeline.keys()):
sentences = timeline[date]