Notes Pdf — Urban Planning Lecture
def identify_sections(self) -> Dict[str, str]: """Identify and extract major sections from lecture notes""" lines = self.full_text.split('\n') current_section = "Introduction" sections = current_section: [] # Common urban planning section headers section_patterns = [ r'(?i)^(?:chapter|section|part)\s+\d+[:.\s]+(.+)$', r'(?i)^(\d+\.\d+)\s+(.+)$', r'(?i)^([A-Z][A-Z\s]5,)$', # ALL CAPS headers r'(?i)^(introduction|background|methodology|analysis|conclusion|references)$', r'(?i)^(zoning|transportation|land use|environmental|housing|infrastructure|sustainability)', r'(?i)^(smart growth|new urbanism|urban design|public participation|economic development)' ] for line in lines: line = line.strip() if not line: continue section_found = False for pattern in section_patterns: if re.match(pattern, line): current_section = line[:50] # Limit section name length sections[current_section] = [] section_found = True break if not section_found and current_section: sections[current_section].append(line) # Convert lists to strings self.sections = k: ' '.join(v) for k, v in sections.items() if v return self.sections
class UrbanPlanningNotesAnalyzer: def (self, pdf_path: str): self.pdf_path = pdf_path self.full_text = "" self.pages_text = [] self.sections = {} self.key_concepts = [] self.case_studies = []
def _extract_principles(self) -> List[str]: """Extract core urban planning principles""" principle_patterns = [ r'(?i)principle[s]? of (.+?)[\.\n]', r'(?i)core (?:concept|principle)[s]?: (.+?)[\.\n]', r'(?i)([^.]*?(?:should|must|requires|essential|crucial|important)[^.]*?\.)' ] principles = [] for pattern in principle_patterns: matches = re.findall(pattern, self.full_text) principles.extend(matches[:5]) return principles[:10] urban planning lecture notes pdf
def _take_quiz(self): questions = self.analyzer.generate_study_questions()[:5] score = 0 print("\n📝 QUICK QUIZ (5 questions)") print("Answer in your own words, then press Enter for sample answer\n") for i, q in enumerate(questions, 1): print(f"\ni. q['question']") input("Press Enter to see sample answer...") print(f"\n Sample approach: q['hint']") print(" Review the relevant section for complete answer.\n") def main(): # Replace with your PDF path pdf_path = "urban_planning_lecture_notes.pdf"
def extract_key_concepts(self) -> List[Dict]: """Extract and rank key urban planning concepts""" stop_words = set(stopwords.words('english')) # Urban planning specific terminology planning_terms = [ 'zoning', 'land use', 'transportation', 'infrastructure', 'sustainability', 'urban design', 'smart growth', 'new urbanism', 'gentrification', 'affordable housing', 'public space', 'transit-oriented development', 'mixed-use', 'walkability', 'green infrastructure', 'climate resilience', 'urban renewal', 'community engagement', 'comprehensive plan', 'subdivision', 'environmental impact', 'historic preservation', 'urban sprawl', 'density', 'parking', 'complete streets', 'placemaking' ] # Tokenize and find frequencies words = word_tokenize(self.full_text.lower()) words = [w for w in words if w.isalpha() and w not in stop_words] # Count frequencies of planning terms concept_counts = Counter() for term in planning_terms: count = self.full_text.lower().count(term) if count > 0: concept_counts[term] = count # Extract context for each concept concepts = [] for concept, count in concept_counts.most_common(20): # Find sentences containing the concept sentences = sent_tokenize(self.full_text) context_sentences = [s for s in sentences if concept.lower() in s.lower()] context = context_sentences[:2] if context_sentences else [] concepts.append( 'term': concept, 'frequency': count, 'context': context ) self.key_concepts = concepts return concepts def identify_sections(self) ->
def create_summary(self) -> Dict: """Create a structured summary of the lecture notes""" summary = 'total_pages': len(self.pages_text), 'total_words': len(self.full_text.split()), 'key_topics': [c['term'] for c in self.key_concepts[:15]], 'case_studies_count': len(self.case_studies), 'main_sections': list(self.sections.keys())[:10], 'core_principles': self._extract_principles(), 'recommended_focus_areas': self._identify_focus_areas() return summary
def extract_text_from_pdf(self) -> str: """Extract text from PDF file""" text = "" with open(self.pdf_path, 'rb') as file: pdf_reader = PyPDF2.PdfReader(file) for page_num, page in enumerate(pdf_reader.pages): page_text = page.extract_text() self.pages_text.append( 'page_num': page_num + 1, 'text': page_text ) text += page_text + "\n" self.full_text = text return text r'(?i)core (?:concept|principle)[s]?: (.+?)[\.\n]'
def generate_study_questions(self) -> List[Dict]: """Generate study questions based on key concepts and sections""" questions = [] # Generate questions from key concepts for concept in self.key_concepts[:10]: questions.append( 'type': 'concept', 'question': f"What are the key principles and applications of concept['term'] in urban planning?", 'related_concept': concept['term'], 'hint': f"Review section discussing concept['term'] (mentioned concept['frequency'] times)" ) # Generate questions from sections for section_name, section_text in list(self.sections.items())[:5]: if len(section_text) > 100: questions.append( 'type': 'section', 'question': f"Summarize the main arguments presented in 'section_name' regarding urban planning approaches.", 'related_section': section_name, 'hint': "Focus on the key definitions and examples provided" ) # Add comparative questions if len(self.case_studies) >= 2: questions.append( 'type': 'comparative', 'question': f"Compare and contrast the urban planning approaches in 'self.case_studies[0]['title']' vs 'self.case_studies[1]['title']'.", 'hint': "Consider differences in context, implementation, and outcomes" ) return questions
def extract_case_studies(self) -> List[Dict]: """Identify and extract case studies from lecture notes""" case_patterns = [ r'(?i)case study[:]\s*(.+?)(?:\n\n|\n\s*\n|$)', r'(?i)example[:]\s*(.+?)(?:\n\n|\n\s*\n|$)', r'(?i)([A-Z][a-z]+(?:[-\s][A-Z][a-z]+)*)\s+(?:is\s+an\s+example|demonstrates|illustrates)', ] case_studies = [] sentences = sent_tokenize(self.full_text) for i, sentence in enumerate(sentences): for pattern in case_patterns: matches = re.findall(pattern, sentence) for match in matches: # Get surrounding context start_idx = max(0, i - 2) end_idx = min(len(sentences), i + 3) context = ' '.join(sentences[start_idx:end_idx]) case_studies.append( 'title': match if isinstance(match, str) else match[0], 'description': sentence, 'context': context ) self.case_studies = case_studies return case_studies