Skip to article frontmatterSkip to article content
Site not loading correctly?

This may be due to an incorrect BASE_URL configuration. See the MyST Documentation for reference.

Session 10: Capstone Project and Case Studies

Event Studies in Finance and Economics - Summer School


Session Overview

This final session brings together everything we’ve learned through:

  1. Three complete case studies demonstrating different event study applications

  2. A comprehensive reusable toolkit consolidating all methods

  3. Guided capstone project with real data

  4. Course summary and further reading


Setup: Complete Event Study Toolkit

We begin by loading a comprehensive toolkit that integrates all methods from previous sessions.

Source
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from scipy import stats
import yfinance as yf
import statsmodels.api as sm
from datetime import datetime, timedelta
from dataclasses import dataclass, field
from typing import List, Dict, Optional, Tuple, Union, Callable
from enum import Enum
import warnings
warnings.filterwarnings('ignore')

plt.style.use('seaborn-v0_8-whitegrid')
sns.set_palette("husl")

print("="*70)
print("EVENT STUDY TOOLKIT - SUMMER SCHOOL CAPSTONE")
print("="*70)
Source
# ============================================================================
# COMPLETE EVENT STUDY TOOLKIT
# ============================================================================

class ExpectedReturnModel(Enum):
    MARKET_MODEL = "market_model"
    MARKET_ADJUSTED = "market_adjusted"
    MEAN_ADJUSTED = "mean_adjusted"


@dataclass
class EventStudyConfig:
    """Configuration for event study."""
    estimation_window: int = 120
    gap: int = 10
    event_window_pre: int = 5
    event_window_post: int = 5
    expected_return_model: ExpectedReturnModel = ExpectedReturnModel.MARKET_MODEL
    min_estimation_obs: int = 60
    market_index: str = "^GSPC"


@dataclass
class EventResult:
    """Results for a single event."""
    ticker: str
    event_date: pd.Timestamp
    event_data: pd.DataFrame
    alpha: float
    beta: float
    sigma: float
    r_squared: float
    n_est: int
    valid: bool = True
    error: str = ""
    metadata: Dict = field(default_factory=dict)
    
    def car(self, t1: int, t2: int) -> float:
        mask = (self.event_data['t'] >= t1) & (self.event_data['t'] <= t2)
        return self.event_data.loc[mask, 'AR'].sum()
    
    def scar(self, t1: int, t2: int) -> float:
        L = len(self.event_data[(self.event_data['t'] >= t1) & (self.event_data['t'] <= t2)])
        var = L * self.sigma ** 2
        return self.car(t1, t2) / np.sqrt(var) if var > 0 else np.nan


class EventStudyToolkit:
    """
    Complete event study toolkit consolidating all course methods.
    """
    
    def __init__(self, config: EventStudyConfig = None):
        self.config = config or EventStudyConfig()
        self._cache = {}
    
    # ==================== DATA METHODS ====================
    
    def download_data(self, ticker: str, start: str, end: str) -> pd.Series:
        """Download and cache price data."""
        key = (ticker, start, end)
        if key not in self._cache:
            data = yf.download(ticker, start=start, end=end, progress=False)['Adj Close']
            self._cache[key] = data.squeeze().pct_change().dropna()
        return self._cache[key]
    
    # ==================== ESTIMATION METHODS ====================
    
    def estimate_market_model(self, stock_ret: pd.Series, 
                               market_ret: pd.Series) -> Tuple[float, float, float, float]:
        """Estimate market model: R_i = alpha + beta * R_m + epsilon."""
        common = stock_ret.index.intersection(market_ret.index)
        y, x = stock_ret.loc[common], market_ret.loc[common]
        X = sm.add_constant(x)
        model = sm.OLS(y, X).fit()
        return model.params.iloc[0], model.params.iloc[1], np.std(model.resid, ddof=2), model.rsquared
    
    # ==================== CORE EVENT STUDY ====================
    
    def process_event(self, ticker: str, event_date: str, 
                       metadata: Dict = None) -> EventResult:
        """Process a single event."""
        try:
            event_dt = pd.to_datetime(event_date)
            cfg = self.config
            
            # Date range
            total_pre = cfg.estimation_window + cfg.gap + cfg.event_window_pre
            start = event_dt - timedelta(days=int(total_pre * 1.5))
            end = event_dt + timedelta(days=int(cfg.event_window_post * 2.5))
            
            # Download data
            stock = self.download_data(ticker, start.strftime('%Y-%m-%d'), end.strftime('%Y-%m-%d'))
            market = self.download_data(cfg.market_index, start.strftime('%Y-%m-%d'), end.strftime('%Y-%m-%d'))
            
            # Align
            common = stock.index.intersection(market.index)
            df = pd.DataFrame({'stock': stock.loc[common], 'market': market.loc[common]})
            
            # Find event date
            if event_dt not in df.index:
                idx = df.index.get_indexer([event_dt], method='nearest')[0]
                event_dt = df.index[idx]
            
            event_idx = df.index.get_loc(event_dt)
            df['t'] = range(-event_idx, len(df) - event_idx)
            
            # Split periods
            est_end = -(cfg.gap + cfg.event_window_pre)
            est_mask = (df['t'] >= est_end - cfg.estimation_window) & (df['t'] < est_end)
            evt_mask = (df['t'] >= -cfg.event_window_pre) & (df['t'] <= cfg.event_window_post)
            
            est_data = df[est_mask]
            evt_data = df[evt_mask].copy()
            
            if len(est_data) < cfg.min_estimation_obs:
                raise ValueError(f"Insufficient estimation data: {len(est_data)}")
            
            # Estimate model
            if cfg.expected_return_model == ExpectedReturnModel.MARKET_MODEL:
                alpha, beta, sigma, r2 = self.estimate_market_model(est_data['stock'], est_data['market'])
                evt_data['ER'] = alpha + beta * evt_data['market']
            elif cfg.expected_return_model == ExpectedReturnModel.MARKET_ADJUSTED:
                alpha, beta, sigma, r2 = 0, 1, est_data['stock'].std(), 0
                evt_data['ER'] = evt_data['market']
            else:  # MEAN_ADJUSTED
                alpha = est_data['stock'].mean()
                beta, sigma, r2 = 0, est_data['stock'].std(), 0
                evt_data['ER'] = alpha
            
            evt_data['AR'] = evt_data['stock'] - evt_data['ER']
            evt_data['CAR'] = evt_data['AR'].cumsum()
            
            return EventResult(
                ticker=ticker, event_date=event_dt, event_data=evt_data,
                alpha=alpha, beta=beta, sigma=sigma, r_squared=r2,
                n_est=len(est_data), metadata=metadata or {}
            )
            
        except Exception as e:
            return EventResult(
                ticker=ticker, event_date=pd.to_datetime(event_date),
                event_data=None, alpha=0, beta=0, sigma=0, r_squared=0,
                n_est=0, valid=False, error=str(e)
            )
    
    def process_events(self, events: List[Dict], verbose: bool = True) -> List[EventResult]:
        """Process multiple events."""
        results = []
        for i, e in enumerate(events):
            if verbose:
                print(f"  [{i+1}/{len(events)}] {e['ticker']}...", end=" ")
            result = self.process_event(e['ticker'], e['date'], e.get('metadata', {}))
            results.append(result)
            if verbose:
                print("OK" if result.valid else f"FAILED: {result.error}")
        return results
    
    # ==================== STATISTICAL TESTS ====================
    
    def cross_sectional_test(self, results: List[EventResult], t1: int, t2: int) -> Dict:
        """Cross-sectional t-test on CARs."""
        cars = np.array([r.car(t1, t2) for r in results if r.valid])
        n = len(cars)
        mean = np.mean(cars)
        std = np.std(cars, ddof=1)
        t_stat = mean / (std / np.sqrt(n)) if std > 0 else 0
        p_val = 2 * (1 - stats.t.cdf(abs(t_stat), df=n-1))
        return {'caar': mean, 'std': std, 't': t_stat, 'p': p_val, 'n': n, 'cars': cars}
    
    def patell_test(self, results: List[EventResult], t1: int, t2: int) -> Dict:
        """Patell (1976) standardized residual test."""
        scars = np.array([r.scar(t1, t2) for r in results if r.valid])
        scars = scars[~np.isnan(scars)]
        n = len(scars)
        z = np.sum(scars) / np.sqrt(n) if n > 0 else 0
        p = 2 * (1 - stats.norm.cdf(abs(z)))
        return {'z': z, 'p': p, 'n': n}
    
    def bmp_test(self, results: List[EventResult], t1: int, t2: int) -> Dict:
        """Boehmer-Musumeci-Poulsen (1991) test."""
        scars = np.array([r.scar(t1, t2) for r in results if r.valid])
        scars = scars[~np.isnan(scars)]
        n = len(scars)
        mean_s = np.mean(scars)
        std_s = np.std(scars, ddof=1)
        t_stat = mean_s / (std_s / np.sqrt(n)) if std_s > 0 else 0
        p = 2 * (1 - stats.t.cdf(abs(t_stat), df=n-1))
        return {'t': t_stat, 'p': p, 'n': n}
    
    def sign_test(self, results: List[EventResult], t1: int, t2: int) -> Dict:
        """Non-parametric sign test."""
        cars = np.array([r.car(t1, t2) for r in results if r.valid])
        n_pos = np.sum(cars > 0)
        n = len(cars)
        z = (n_pos - 0.5*n) / np.sqrt(0.25*n)
        p = 2 * (1 - stats.norm.cdf(abs(z)))
        return {'n_pos': n_pos, 'pct_pos': n_pos/n*100, 'z': z, 'p': p, 'n': n}
    
    def rank_test(self, results: List[EventResult], t1: int, t2: int) -> Dict:
        """Corrado (1989) rank test."""
        valid = [r for r in results if r.valid]
        K_avg = []
        for r in valid:
            mask = (r.event_data['t'] >= t1) & (r.event_data['t'] <= t2)
            ar_event = r.event_data.loc[mask, 'AR'].values
            # Simple approximation using event window ranks
            ranks = stats.rankdata(ar_event)
            T = len(ar_event)
            K = ranks / (T + 1) - 0.5
            K_avg.append(np.mean(K))
        
        K_avg = np.array(K_avg)
        n = len(K_avg)
        mean_K = np.mean(K_avg)
        std_K = np.std(K_avg, ddof=1)
        z = mean_K / (std_K / np.sqrt(n)) if std_K > 0 else 0
        p = 2 * (1 - stats.norm.cdf(abs(z)))
        return {'z': z, 'p': p, 'n': n}
    
    def bootstrap_test(self, results: List[EventResult], t1: int, t2: int, 
                        n_boot: int = 5000) -> Dict:
        """Bootstrap confidence interval."""
        cars = np.array([r.car(t1, t2) for r in results if r.valid])
        n = len(cars)
        observed = np.mean(cars)
        
        np.random.seed(42)
        boot = np.array([np.mean(np.random.choice(cars, n, replace=True)) for _ in range(n_boot)])
        
        ci = np.percentile(boot, [2.5, 97.5])
        p = np.mean(np.abs(boot - observed) >= np.abs(observed))
        
        return {'caar': observed, 'ci_low': ci[0], 'ci_high': ci[1], 'se': np.std(boot), 'p': p, 'n': n}
    
    # ==================== ANALYSIS ====================
    
    def comprehensive_analysis(self, results: List[EventResult],
                                windows: List[Tuple[int, int]] = None) -> pd.DataFrame:
        """Run all tests for multiple windows."""
        if windows is None:
            windows = [(-1, 1), (0, 0), (-5, 5), (0, 5)]
        
        rows = []
        for t1, t2 in windows:
            cs = self.cross_sectional_test(results, t1, t2)
            pt = self.patell_test(results, t1, t2)
            bmp = self.bmp_test(results, t1, t2)
            sign = self.sign_test(results, t1, t2)
            rank = self.rank_test(results, t1, t2)
            
            rows.append({
                'Window': f"({t1},{t2})",
                'N': cs['n'],
                'CAAR': cs['caar'],
                'Median': np.median(cs['cars']),
                't-stat': cs['t'],
                'Patell-z': pt['z'],
                'BMP-t': bmp['t'],
                'Sign-z': sign['z'],
                'Rank-z': rank['z'],
                '%Pos': sign['pct_pos'],
                'p-value': cs['p']
            })
        
        return pd.DataFrame(rows)
    
    def get_time_series(self, results: List[EventResult]) -> pd.DataFrame:
        """Calculate AAR and CAAR time series."""
        valid = [r for r in results if r.valid]
        all_t = sorted(set(t for r in valid for t in r.event_data['t'].values))
        
        rows = []
        for t in all_t:
            ars = [r.event_data[r.event_data['t'] == t]['AR'].values[0] 
                   for r in valid if t in r.event_data['t'].values]
            if ars:
                rows.append({'t': t, 'AAR': np.mean(ars), 'AAR_se': np.std(ars, ddof=1)/np.sqrt(len(ars)), 'n': len(ars)})
        
        df = pd.DataFrame(rows)
        df['CAAR'] = df['AAR'].cumsum()
        return df
    
    # ==================== VISUALIZATION ====================
    
    def plot_caar(self, results: List[EventResult], ax=None, title: str = None):
        """Plot CAAR with confidence bands."""
        ts = self.get_time_series(results)
        
        if ax is None:
            fig, ax = plt.subplots(figsize=(10, 6))
        
        ax.plot(ts['t'], ts['CAAR']*100, 'b-', linewidth=2, marker='o', markersize=4)
        cum_se = ts['AAR_se'].cumsum()
        ax.fill_between(ts['t'], (ts['CAAR']-1.96*cum_se)*100, (ts['CAAR']+1.96*cum_se)*100, alpha=0.2)
        ax.axhline(0, color='gray', linewidth=0.5)
        ax.axvline(0, color='red', linestyle='--', linewidth=2)
        ax.set_xlabel('Event Time')
        ax.set_ylabel('CAAR (%)')
        ax.set_title(title or 'Cumulative Average Abnormal Return')
        return ax
    
    def plot_car_distribution(self, results: List[EventResult], t1: int, t2: int, ax=None):
        """Plot CAR distribution."""
        cars = [r.car(t1, t2)*100 for r in results if r.valid]
        
        if ax is None:
            fig, ax = plt.subplots(figsize=(8, 5))
        
        ax.hist(cars, bins=15, edgecolor='black', alpha=0.7)
        ax.axvline(np.mean(cars), color='red', linestyle='--', linewidth=2, label=f'Mean={np.mean(cars):.2f}%')
        ax.axvline(0, color='black', linewidth=1)
        ax.set_xlabel(f'CAR({t1},{t2}) %')
        ax.set_ylabel('Frequency')
        ax.legend()
        return ax


print("EventStudyToolkit loaded successfully!")

Case Study 1: Earnings Announcements

Research Question

Do earnings announcements with positive surprises generate positive abnormal returns?

This is a classic event study application testing semi-strong form market efficiency.

Source
# Case Study 1: Earnings Announcements
print("\n" + "="*80)
print("CASE STUDY 1: EARNINGS ANNOUNCEMENT EFFECTS")
print("="*80)

# Sample: Q2-Q3 2023 earnings with surprise data
earnings_events = [
    # Positive surprises
    {'ticker': 'NVDA', 'date': '2023-08-23', 'metadata': {'surprise': 0.35, 'type': 'positive'}},
    {'ticker': 'META', 'date': '2023-07-26', 'metadata': {'surprise': 0.22, 'type': 'positive'}},
    {'ticker': 'AMZN', 'date': '2023-08-03', 'metadata': {'surprise': 0.45, 'type': 'positive'}},
    {'ticker': 'GOOGL', 'date': '2023-07-25', 'metadata': {'surprise': 0.12, 'type': 'positive'}},
    {'ticker': 'MSFT', 'date': '2023-07-25', 'metadata': {'surprise': 0.08, 'type': 'positive'}},
    {'ticker': 'AAPL', 'date': '2023-08-03', 'metadata': {'surprise': 0.05, 'type': 'positive'}},
    {'ticker': 'JPM', 'date': '2023-07-14', 'metadata': {'surprise': 0.10, 'type': 'positive'}},
    {'ticker': 'WMT', 'date': '2023-08-17', 'metadata': {'surprise': 0.09, 'type': 'positive'}},
    # Negative/mixed surprises
    {'ticker': 'TSLA', 'date': '2023-07-19', 'metadata': {'surprise': -0.05, 'type': 'negative'}},
    {'ticker': 'GS', 'date': '2023-07-19', 'metadata': {'surprise': -0.15, 'type': 'negative'}},
    {'ticker': 'AMD', 'date': '2023-08-01', 'metadata': {'surprise': -0.02, 'type': 'negative'}},
    {'ticker': 'INTC', 'date': '2023-07-27', 'metadata': {'surprise': 0.18, 'type': 'positive'}},
]

# Configure and run
config = EventStudyConfig(
    estimation_window=120,
    gap=10,
    event_window_pre=5,
    event_window_post=5
)

toolkit = EventStudyToolkit(config)
print("\nProcessing earnings events...")
earnings_results = toolkit.process_events(earnings_events)
Source
# Full sample analysis
print("\n" + "-"*80)
print("FULL SAMPLE RESULTS")
print("-"*80)

analysis = toolkit.comprehensive_analysis(earnings_results)

def sig(val):
    if abs(val) > 2.58: return '***'
    if abs(val) > 1.96: return '**'
    if abs(val) > 1.65: return '*'
    return ''

print(f"\n{'Window':<10} {'N':>4} {'CAAR':>10} {'Median':>10} {'t-stat':>10} {'Patell':>10} {'%Pos':>8}")
print("-"*70)
for _, row in analysis.iterrows():
    print(f"{row['Window']:<10} {int(row['N']):>4} {row['CAAR']*100:>+9.2f}% {row['Median']*100:>+9.2f}% "
          f"{row['t-stat']:>8.2f}{sig(row['t-stat']):<2} {row['Patell-z']:>8.2f}{sig(row['Patell-z']):<2} "
          f"{row['%Pos']:>7.0f}%")
Source
# Subsample analysis: Positive vs Negative surprises
print("\n" + "-"*80)
print("SUBSAMPLE ANALYSIS: POSITIVE vs NEGATIVE SURPRISES")
print("-"*80)

pos_results = [r for r in earnings_results if r.valid and r.metadata.get('type') == 'positive']
neg_results = [r for r in earnings_results if r.valid and r.metadata.get('type') == 'negative']

print(f"\nPositive Surprises (N={len(pos_results)}):")
if pos_results:
    pos_test = toolkit.cross_sectional_test(pos_results, -1, 1)
    print(f"  CAAR(-1,+1) = {pos_test['caar']*100:+.2f}% (t = {pos_test['t']:.2f})")

print(f"\nNegative Surprises (N={len(neg_results)}):")
if neg_results:
    neg_test = toolkit.cross_sectional_test(neg_results, -1, 1)
    print(f"  CAAR(-1,+1) = {neg_test['caar']*100:+.2f}% (t = {neg_test['t']:.2f})")
Source
# Visualization
fig, axes = plt.subplots(2, 2, figsize=(14, 10))

# CAAR plot
toolkit.plot_caar(earnings_results, ax=axes[0, 0], title='CAAR: Earnings Announcements')

# CAR distribution
toolkit.plot_car_distribution(earnings_results, -1, 1, ax=axes[0, 1])
axes[0, 1].set_title('CAR(-1,+1) Distribution')

# CAR vs Surprise scatter
ax3 = axes[1, 0]
surprises = [r.metadata.get('surprise', 0) for r in earnings_results if r.valid]
cars = [r.car(-1, 1)*100 for r in earnings_results if r.valid]
colors = ['green' if s > 0 else 'red' for s in surprises]
ax3.scatter(surprises, cars, c=colors, s=100, alpha=0.7)

# Regression line
z = np.polyfit(surprises, cars, 1)
p = np.poly1d(z)
x_line = np.linspace(min(surprises), max(surprises), 100)
ax3.plot(x_line, p(x_line), 'b--', linewidth=2)

ax3.axhline(0, color='gray', linewidth=0.5)
ax3.axvline(0, color='gray', linewidth=0.5)
ax3.set_xlabel('Earnings Surprise')
ax3.set_ylabel('CAR(-1,+1) %')
ax3.set_title('Earnings Response Coefficient')

# Individual firm bars
ax4 = axes[1, 1]
valid = [r for r in earnings_results if r.valid]
tickers = [r.ticker for r in valid]
firm_cars = [r.car(-1, 1)*100 for r in valid]
colors = ['green' if c > 0 else 'red' for c in firm_cars]
ax4.barh(range(len(tickers)), firm_cars, color=colors, alpha=0.7)
ax4.set_yticks(range(len(tickers)))
ax4.set_yticklabels(tickers)
ax4.axvline(0, color='black', linewidth=0.5)
ax4.set_xlabel('CAR(-1,+1) %')
ax4.set_title('Individual Firm CARs')

plt.tight_layout()
plt.show()

# Cross-sectional regression
print("\n" + "-"*80)
print("CROSS-SECTIONAL REGRESSION: CAR on Surprise")
print("-"*80)
y = np.array(cars)
X = sm.add_constant(np.array(surprises))
model = sm.OLS(y, X).fit()
print(f"\nCAR(-1,+1) = {model.params[0]:.4f} + {model.params[1]:.4f} × Surprise")
print(f"            ({model.tvalues[0]:.2f})   ({model.tvalues[1]:.2f})")
print(f"R² = {model.rsquared:.3f}")

Case Study 2: M&A Announcements

Research Question

How do target and acquirer stock prices react to M&A announcements?

Classic finding: Targets gain, acquirers break even or lose.

Source
# Case Study 2: M&A Announcements
print("\n" + "="*80)
print("CASE STUDY 2: M&A ANNOUNCEMENT EFFECTS")
print("="*80)

# Sample: Recent tech M&A (using acquirer stocks where target was acquired)
# Using acquirers since targets often delist
ma_events = [
    # Major tech acquisitions (acquirer perspective)
    {'ticker': 'MSFT', 'date': '2022-01-18', 'metadata': {'role': 'acquirer', 'target': 'Activision', 'value': 69}},
    {'ticker': 'AVGO', 'date': '2023-05-26', 'metadata': {'role': 'acquirer', 'target': 'VMware', 'value': 61}},
    {'ticker': 'AMZN', 'date': '2022-03-17', 'metadata': {'role': 'acquirer', 'target': 'MGM', 'value': 8.5}},
    {'ticker': 'META', 'date': '2021-10-28', 'metadata': {'role': 'acquirer', 'target': 'Within', 'value': 0.4}},
    {'ticker': 'GOOGL', 'date': '2022-03-15', 'metadata': {'role': 'acquirer', 'target': 'Mandiant', 'value': 5.4}},
    {'ticker': 'CRM', 'date': '2020-12-01', 'metadata': {'role': 'acquirer', 'target': 'Slack', 'value': 27.7}},
    {'ticker': 'AMD', 'date': '2020-10-27', 'metadata': {'role': 'acquirer', 'target': 'Xilinx', 'value': 35}},
    {'ticker': 'NVDA', 'date': '2020-09-13', 'metadata': {'role': 'acquirer', 'target': 'ARM', 'value': 40}},
]

# Process M&A events
ma_config = EventStudyConfig(
    estimation_window=120,
    gap=10,
    event_window_pre=10,
    event_window_post=10
)

ma_toolkit = EventStudyToolkit(ma_config)
print("\nProcessing M&A events...")
ma_results = ma_toolkit.process_events(ma_events)
Source
# M&A Results
print("\n" + "-"*80)
print("M&A ANNOUNCEMENT RESULTS (ACQUIRER RETURNS)")
print("-"*80)

ma_analysis = ma_toolkit.comprehensive_analysis(ma_results, windows=[(-1, 1), (-3, 3), (-5, 5), (-10, 10)])

print(f"\n{'Window':<12} {'N':>4} {'CAAR':>10} {'t-stat':>10} {'%Pos':>8}")
print("-"*50)
for _, row in ma_analysis.iterrows():
    print(f"{row['Window']:<12} {int(row['N']):>4} {row['CAAR']*100:>+9.2f}% "
          f"{row['t-stat']:>8.2f}{sig(row['t-stat']):<2} {row['%Pos']:>7.0f}%")

print("\nNote: Acquirers typically show zero or negative CARs at announcement.")
Source
# M&A Visualization
fig, axes = plt.subplots(1, 2, figsize=(14, 5))

# CAAR plot
ma_toolkit.plot_caar(ma_results, ax=axes[0], title='Acquirer CAAR: M&A Announcements')

# Deal value vs CAR
ax2 = axes[1]
valid_ma = [r for r in ma_results if r.valid]
values = [r.metadata.get('value', 0) for r in valid_ma]
ma_cars = [r.car(-1, 1)*100 for r in valid_ma]
tickers = [r.ticker for r in valid_ma]

ax2.scatter(values, ma_cars, s=150, alpha=0.7)
for i, ticker in enumerate(tickers):
    ax2.annotate(ticker, (values[i], ma_cars[i]), xytext=(5, 5), textcoords='offset points')

ax2.axhline(0, color='gray', linewidth=0.5)
ax2.set_xlabel('Deal Value ($B)')
ax2.set_ylabel('CAR(-1,+1) %')
ax2.set_title('Acquirer CAR vs Deal Value')

plt.tight_layout()
plt.show()

Case Study 3: Regulatory/Policy Events

Research Question

How do industry stocks react to major regulatory announcements?

Example: Fed interest rate decisions affect financial stocks.

Source
# Case Study 3: Fed Rate Decisions - Bank Stock Reactions
print("\n" + "="*80)
print("CASE STUDY 3: FED RATE DECISION EFFECTS ON BANK STOCKS")
print("="*80)

# 2023 Fed meetings with rate decisions
fed_dates = ['2023-02-01', '2023-03-22', '2023-05-03', '2023-06-14', 
             '2023-07-26', '2023-09-20', '2023-11-01', '2023-12-13']

# Bank stocks to analyze
banks = ['JPM', 'BAC', 'WFC', 'C', 'GS', 'MS']

# Create events: Each bank for each Fed date
fed_events = []
for date in fed_dates:
    for bank in banks:
        fed_events.append({'ticker': bank, 'date': date, 'metadata': {'fed_date': date}})

# Configure for short window (Fed announcements are precise)
fed_config = EventStudyConfig(
    estimation_window=100,
    gap=5,
    event_window_pre=2,
    event_window_post=2
)

fed_toolkit = EventStudyToolkit(fed_config)
print(f"\nProcessing {len(fed_events)} bank-Fed date combinations...")
fed_results = fed_toolkit.process_events(fed_events, verbose=False)
valid_fed = [r for r in fed_results if r.valid]
print(f"Valid observations: {len(valid_fed)}")
Source
# Fed Results
print("\n" + "-"*80)
print("BANK STOCK RESPONSE TO FED ANNOUNCEMENTS")
print("-"*80)

fed_analysis = fed_toolkit.comprehensive_analysis(fed_results, windows=[(-1, 1), (0, 0), (0, 1)])

print(f"\n{'Window':<12} {'N':>4} {'CAAR':>10} {'t-stat':>10} {'Patell':>10} {'%Pos':>8}")
print("-"*60)
for _, row in fed_analysis.iterrows():
    print(f"{row['Window']:<12} {int(row['N']):>4} {row['CAAR']*100:>+9.3f}% "
          f"{row['t-stat']:>8.2f}{sig(row['t-stat']):<2} {row['Patell-z']:>8.2f}{sig(row['Patell-z']):<2} "
          f"{row['%Pos']:>7.0f}%")
Source
# Analysis by bank
print("\n" + "-"*80)
print("RESULTS BY BANK")
print("-"*80)

bank_results = {}
for bank in banks:
    bank_events = [r for r in fed_results if r.valid and r.ticker == bank]
    if bank_events:
        test = fed_toolkit.cross_sectional_test(bank_events, -1, 1)
        bank_results[bank] = test

print(f"\n{'Bank':<8} {'N':>4} {'CAAR(-1,+1)':>14} {'t-stat':>10}")
print("-"*40)
for bank, test in bank_results.items():
    print(f"{bank:<8} {test['n']:>4} {test['caar']*100:>+13.3f}% {test['t']:>8.2f}{sig(test['t'])}")
Source
# Fed study visualization
fig, axes = plt.subplots(1, 2, figsize=(14, 5))

# CAAR
fed_toolkit.plot_caar(fed_results, ax=axes[0], title='Bank Stock CAAR: Fed Announcements')

# By bank bar chart
ax2 = axes[1]
bank_caars = [bank_results[b]['caar']*100 for b in banks if b in bank_results]
bank_labels = [b for b in banks if b in bank_results]
colors = ['green' if c > 0 else 'red' for c in bank_caars]

ax2.bar(bank_labels, bank_caars, color=colors, alpha=0.7, edgecolor='black')
ax2.axhline(0, color='black', linewidth=0.5)
ax2.set_ylabel('Average CAR(-1,+1) %')
ax2.set_title('Average Response by Bank')

plt.tight_layout()
plt.show()

Capstone Project: Your Own Event Study

Instructions

Use the toolkit below to conduct your own event study. The template provides:

  1. Event definition - Define your events

  2. Configuration - Set your parameters

  3. Processing - Run the event study

  4. Analysis - Statistical tests

  5. Visualization - Graphs and tables

  6. Interpretation - Draw conclusions

Source
# ============================================================================
# CAPSTONE PROJECT TEMPLATE
# ============================================================================

print("\n" + "="*80)
print("CAPSTONE PROJECT: DIVIDEND ANNOUNCEMENT EFFECTS")
print("="*80)

# STEP 1: Define your research question
print("""
RESEARCH QUESTION:
Do dividend increase announcements generate positive abnormal returns?

HYPOTHESIS:
H1: CAAR(-1,+1) > 0 for dividend increase announcements
""")

# STEP 2: Define your events
# Example: Dividend increase announcements in 2023
dividend_events = [
    {'ticker': 'AAPL', 'date': '2023-05-04', 'metadata': {'increase_pct': 4.3}},
    {'ticker': 'MSFT', 'date': '2023-09-19', 'metadata': {'increase_pct': 10.0}},
    {'ticker': 'JPM', 'date': '2023-06-28', 'metadata': {'increase_pct': 5.0}},
    {'ticker': 'JNJ', 'date': '2023-04-18', 'metadata': {'increase_pct': 5.3}},
    {'ticker': 'PG', 'date': '2023-04-11', 'metadata': {'increase_pct': 3.0}},
    {'ticker': 'KO', 'date': '2023-02-16', 'metadata': {'increase_pct': 4.5}},
    {'ticker': 'PEP', 'date': '2023-05-02', 'metadata': {'increase_pct': 10.0}},
    {'ticker': 'HD', 'date': '2023-02-23', 'metadata': {'increase_pct': 10.0}},
    {'ticker': 'MCD', 'date': '2023-10-27', 'metadata': {'increase_pct': 10.0}},
    {'ticker': 'V', 'date': '2023-10-24', 'metadata': {'increase_pct': 15.6}},
]

print(f"Sample: {len(dividend_events)} dividend increase announcements")
Source
# STEP 3: Configure the study
div_config = EventStudyConfig(
    estimation_window=120,      # 120 trading days
    gap=10,                      # 10 day gap
    event_window_pre=5,          # 5 days before
    event_window_post=5,         # 5 days after
    expected_return_model=ExpectedReturnModel.MARKET_MODEL,
    min_estimation_obs=60
)

# STEP 4: Process events
div_toolkit = EventStudyToolkit(div_config)
print("\nProcessing dividend events...")
div_results = div_toolkit.process_events(dividend_events)
Source
# STEP 5: Statistical Analysis
print("\n" + "-"*80)
print("STATISTICAL RESULTS")
print("-"*80)

div_analysis = div_toolkit.comprehensive_analysis(div_results)

print(f"\n{'Window':<10} {'N':>4} {'CAAR':>10} {'Median':>10} {'t-stat':>10} {'BMP':>10} {'%Pos':>8}")
print("-"*70)
for _, row in div_analysis.iterrows():
    print(f"{row['Window']:<10} {int(row['N']):>4} {row['CAAR']*100:>+9.3f}% {row['Median']*100:>+9.3f}% "
          f"{row['t-stat']:>8.2f}{sig(row['t-stat']):<2} {row['BMP-t']:>8.2f}{sig(row['BMP-t']):<2} "
          f"{row['%Pos']:>7.0f}%")

# Bootstrap for main window
boot = div_toolkit.bootstrap_test(div_results, -1, 1)
print(f"\nBootstrap 95% CI for CAAR(-1,+1): [{boot['ci_low']*100:.3f}%, {boot['ci_high']*100:.3f}%]")
Source
# STEP 6: Visualization
fig, axes = plt.subplots(2, 2, figsize=(14, 10))

# CAAR plot
div_toolkit.plot_caar(div_results, ax=axes[0, 0], title='CAAR: Dividend Increases')

# Distribution
div_toolkit.plot_car_distribution(div_results, -1, 1, ax=axes[0, 1])
axes[0, 1].set_title('CAR(-1,+1) Distribution')

# CAR vs Dividend Increase %
ax3 = axes[1, 0]
valid_div = [r for r in div_results if r.valid]
increases = [r.metadata.get('increase_pct', 0) for r in valid_div]
div_cars = [r.car(-1, 1)*100 for r in valid_div]
tickers = [r.ticker for r in valid_div]

ax3.scatter(increases, div_cars, s=100, alpha=0.7)
for i, ticker in enumerate(tickers):
    ax3.annotate(ticker, (increases[i], div_cars[i]), xytext=(5, 5), textcoords='offset points', fontsize=8)

# Regression line
z = np.polyfit(increases, div_cars, 1)
p = np.poly1d(z)
x_line = np.linspace(min(increases), max(increases), 100)
ax3.plot(x_line, p(x_line), 'r--', linewidth=2)

ax3.axhline(0, color='gray', linewidth=0.5)
ax3.set_xlabel('Dividend Increase (%)')
ax3.set_ylabel('CAR(-1,+1) %')
ax3.set_title('CAR vs Dividend Increase Magnitude')

# Individual firms
ax4 = axes[1, 1]
colors = ['green' if c > 0 else 'red' for c in div_cars]
ax4.barh(range(len(tickers)), div_cars, color=colors, alpha=0.7)
ax4.set_yticks(range(len(tickers)))
ax4.set_yticklabels(tickers)
ax4.axvline(0, color='black', linewidth=0.5)
ax4.set_xlabel('CAR(-1,+1) %')
ax4.set_title('Individual Firm CARs')

plt.tight_layout()
plt.show()
Source
# STEP 7: Cross-sectional regression
print("\n" + "-"*80)
print("CROSS-SECTIONAL ANALYSIS")
print("-"*80)

y = np.array(div_cars)
X = sm.add_constant(np.array(increases))
cs_model = sm.OLS(y, X).fit()

print(f"\nRegression: CAR = α + β × Dividend_Increase")
print(f"\n  Intercept (α): {cs_model.params[0]:.4f} (t = {cs_model.tvalues[0]:.2f})")
print(f"  Slope (β):     {cs_model.params[1]:.4f} (t = {cs_model.tvalues[1]:.2f})")
print(f"  R-squared:     {cs_model.rsquared:.3f}")
Source
# STEP 8: Write your conclusions
print("\n" + "="*80)
print("CONCLUSIONS")
print("="*80)

caar_11 = div_toolkit.cross_sectional_test(div_results, -1, 1)
sign = div_toolkit.sign_test(div_results, -1, 1)

conclusion = f"""
FINDINGS:

1. Average Effect:
   - CAAR(-1,+1) = {caar_11['caar']*100:+.3f}%
   - t-statistic = {caar_11['t']:.2f}
   - The average announcement effect is {'statistically significant' if abs(caar_11['t']) > 1.96 else 'not statistically significant'} at the 5% level.

2. Sign Test:
   - {sign['pct_pos']:.0f}% of events have positive CARs
   - This {'supports' if sign['pct_pos'] > 50 else 'does not support'} the hypothesis that dividend increases are viewed positively.

3. Cross-Sectional Analysis:
   - The relationship between dividend increase magnitude and CAR is {'positive' if cs_model.params[1] > 0 else 'negative'}.
   - A 1 percentage point larger dividend increase is associated with a {cs_model.params[1]:.3f}% {'higher' if cs_model.params[1] > 0 else 'lower'} CAR.

INTERPRETATION:

The results {'support' if caar_11['t'] > 1.65 and caar_11['caar'] > 0 else 'do not strongly support'} the dividend signaling hypothesis.
{'Investors appear to interpret dividend increases as positive signals about future cash flows.' if caar_11['t'] > 1.65 and caar_11['caar'] > 0 else 'The market reaction is not conclusively positive, suggesting either efficient pricing or other factors at play.'}
"""

print(conclusion)

Course Summary

What We Covered

SessionTopicKey Concepts
1-2FoundationsMarket model, estimation windows, event timing
3Abnormal ReturnsAR, CAR, BHAR, aggregation methods
4Parametric TestsCross-sectional t, Patell, BMP, Kolari-Pynnönen
5Non-parametric TestsSign, rank, bootstrap, power analysis
6Cross-Sectional AnalysisWLS, heteroskedasticity, interactions
7Long-Horizon StudiesBHAR, calendar-time, clustering
8ExtensionsIntraday, bonds, international, DiD
9ImplementationFramework design, robustness, pitfalls
10CapstoneComplete applications, case studies
Source
# Final summary and checklist
print("""
╔══════════════════════════════════════════════════════════════════════════════╗
║                     EVENT STUDY METHODOLOGY SUMMARY                         ║
╠══════════════════════════════════════════════════════════════════════════════╣
║                                                                              ║
║  KEY STEPS:                                                                  ║
║    1. Define event and identify event dates                                  ║
║    2. Collect security and market data                                       ║
║    3. Estimate normal returns (market model, FF, etc.)                       ║
║    4. Calculate abnormal returns                                             ║
║    5. Aggregate across securities                                            ║
║    6. Test for significance                                                  ║
║    7. Cross-sectional analysis                                               ║
║    8. Robustness checks                                                      ║
║                                                                              ║
║  BEST PRACTICES:                                                             ║
║    ✓ Verify event dates from multiple sources                                ║
║    ✓ Use multiple test statistics                                            ║
║    ✓ Report multiple event windows                                           ║
║    ✓ Check robustness to model specification                                 ║
║    ✓ Address event clustering                                                ║
║    ✓ Consider economic significance                                          ║
║                                                                              ║
║  COMMON APPLICATIONS:                                                        ║
║    • Earnings announcements                                                  ║
║    • M&A announcements                                                       ║
║    • Dividend changes                                                        ║
║    • Stock splits                                                            ║
║    • Regulatory/policy changes                                               ║
║    • Management changes                                                      ║
║    • Credit rating changes                                                   ║
║    • Index additions/deletions                                               ║
║                                                                              ║
╚══════════════════════════════════════════════════════════════════════════════╝
""")

Further Reading and Resources

Essential Papers

  1. Foundations

    • Fama, E., Fisher, L., Jensen, M., & Roll, R. (1969). The adjustment of stock prices to new information. International Economic Review, 10(1), 1-21.

    • MacKinlay, A. C. (1997). Event studies in economics and finance. Journal of Economic Literature, 35(1), 13-39.

  2. Test Statistics

    • Patell, J. M. (1976). Corporate forecasts of earnings per share and stock price behavior: Empirical test. Journal of Accounting Research, 14(2), 246-276.

    • Boehmer, E., Musumeci, J., & Poulsen, A. (1991). Event-study methodology under conditions of event-induced variance. Journal of Financial Economics, 30(2), 253-272.

    • Kolari, J. W., & Pynnönen, S. (2010). Event study testing with cross-sectional correlation of abnormal returns. Review of Financial Studies, 23(11), 3996-4025.

  3. Non-Parametric Methods

    • Corrado, C. J. (1989). A nonparametric test for abnormal security-price performance in event studies. Journal of Financial Economics, 23(2), 385-395.

    • Cowan, A. R. (1992). Nonparametric event study tests. Review of Quantitative Finance and Accounting, 2(4), 343-358.

  4. Long-Horizon Studies

    • Barber, B. M., & Lyon, J. D. (1997). Detecting long-run abnormal stock returns: The empirical power and specification of test statistics. Journal of Financial Economics, 43(3), 341-372.

    • Lyon, J. D., Barber, B. M., & Tsai, C. L. (1999). Improved methods for tests of long-run abnormal stock returns. Journal of Finance, 54(1), 165-201.

  5. Comprehensive Review

    • Kothari, S. P., & Warner, J. B. (2007). Econometrics of event studies. In Handbook of Corporate Finance, Volume 1, 3-36.

Textbooks

  • Campbell, J. Y., Lo, A. W., & MacKinlay, A. C. (1997). The Econometrics of Financial Markets. Princeton University Press.

  • Benninga, S. (2014). Financial Modeling. MIT Press.

Source
print("""
╔══════════════════════════════════════════════════════════════════════════════╗
║                                                                              ║
║              CONGRATULATIONS ON COMPLETING THE SUMMER SCHOOL!               ║
║                                                                              ║
║        You now have the tools and knowledge to conduct rigorous             ║
║        event studies for academic research and practical applications.      ║
║                                                                              ║
║        Key Takeaways:                                                        ║
║        • Event studies measure market reaction to information               ║
║        • Multiple test statistics increase robustness                       ║
║        • Design choices matter - be transparent and check robustness        ║
║        • Always consider both statistical and economic significance         ║
║                                                                              ║
║        Good luck with your research!                                        ║
║                                                                              ║
╚══════════════════════════════════════════════════════════════════════════════╝
""")