import rngStream from scipy.stats import norm # normal distribution for CIs and e_gain calc from argparse import ArgumentParser from math import sqrt, log, exp, sin, cos, pi # Simulates the option model of Assignment #1 # # CS 590M: Introduction to Simulation # # Usage: python option.py -n k [-t -d m -p a] # where -n (--num) k: Number of replications given by k # -t (--trial): Trial run to get required number of replications # -d (--debug) m: Verbose output needed, levels from 0-2 # -p (--ptype) a: performance measure desired # a=0 - gain for eg policy # a=1 - gain for tmr policy # a=2 - difference in gain (eg - tmr) # # Uses the rngStream module (the module should be in the Python path or # the same folder) __author__ = "Peter Haas" class Estimator: """ Computes point estimates and confidence intervals """ def __init__(self, conf_pct, epsilon, relative): self.epsilon = epsilon # precison of CI self.relative = relative # relative or absolute precision self.k = 0 # number of values processed so far self.sum = 0.0 # running sum of values self.v = 0.0 # running value of (k-1)*variance self.z = norm.ppf(0.5 + (conf_pct / 200)) # CI normal quantile self.conf_str = str(conf_pct) + "%" # string of form "xx%" for xx% CI def reset(self): self.k = 0 self.sum = 0 self.v = 0 def process_next_val(self, value): self.k += 1 if self.k > 1: diff = self.sum - (self.k - 1) * value self.v += diff/self.k * diff/(self.k-1) self.sum += value def get_variance(self): return self.v/(self.k-1) if self.k > 1 else 0 def get_mean(self): return self.sum/self.k if self.k > 1 else 0 def get_conf_interval(self): hw = self.z * sqrt(self.get_variance()/self.k) point_est = self.get_mean() c_low = point_est - hw c_high = point_est + hw rstr = "{0} Confidence Interval: [ {1:.4f}, {2:.4f} ] (hw = {3:.4f})" return rstr.format(self.conf_str, c_low, c_high, hw) def get_num_trials(self): var = self.get_variance() if var == 0: return "UNAVAILABLE (Need at least 2 pilot reps)" if self.relative: width = self.get_mean() * self.epsilon rstr = "Est. # of reps for +/- {0:.2f}% accuracy: {1}" acc = 100 * self.epsilon else: width = self.epsilon rstr = "Est. # of reps for +/- {0:.4f} accuracy: {1}" acc = self.epsilon nreps = int(1+(var * self.z * self.z)/(width * width)) return rstr.format(acc, nreps) class Sim_Model: """ Instantiates option model """ def __init__(self, p_type,trial): self.p_type = p_type self.normals = [] # holds normal random variables self.num_normals = 20 # number of normals to store self.init_price = 100 self.strike_price = 100 self.mu = -0.05 self.sigma = 0.3 self.alpha = self.mu + 0.5 * self.sigma * self.sigma self.option_period = 20 self.unigen1 = rngStream.RngStream("uniform1") self.unigen2 = rngStream.RngStream("uniform2") # use different streams for trial runs if trial: for i in range(3): self.unigen1.ResetNextSubstream() self.unigen2.ResetNextSubstream() # choose different rng stream for different performance measures for i in range(int(p_type)): self.unigen1.ResetNextSubstream() self.unigen2.ResetNextSubstream() def do_rep(self, est): """ Computes the gain during a single replication """ day = 0 done_tmr = False done_eg = False done = False price = self.init_price verbose_print(2, "\n") while not done: day += 1 # refill vector of normals if needed if len(self.normals) == 0: i = 1 while i < self.num_normals: u1 = self.unigen1.RandU01() u2 = self.unigen2.RandU01() r = sqrt(-2.0 * log(u1)) theta = 2 * pi * u2; self.normals.append(self.sigma * r * cos(theta) + self.mu) self.normals.append(self.sigma * r * sin(theta) + self.mu) i += 2 # update price xnorm = self.normals.pop() price *= exp(xnorm) verbose_print(2, "{0:2d}: {1:10.2f}".format(day, price)) # check for stopping under tmr policy if (not done_tmr) and (self.p_type > 0): if (price > self.strike_price) or (day == self.option_period): gain_tmr = max(price - self.strike_price, 0) done_tmr = True verbose_print(2, " *TMR") if (self.p_type == 1): done = True # check for stopping under eg policy if (not done_eg) and (self.p_type != 1): if (price > self.strike_price): done_eg = True for i in range(1, self.option_period - day + 1): bi = (i * self.mu - log(self.strike_price / price)) \ / (self.sigma*sqrt(i)) e_gain = price * exp(i * self.alpha) \ * norm.cdf(self.sigma * sqrt(i) + bi) \ - self.strike_price * norm.cdf(bi) if price - self.strike_price <= e_gain: done_eg = False break if day == self.option_period: done_eg = True if done_eg: gain_eg = max(price - self.strike_price, 0) verbose_print(2, " *EG") if self.p_type == 0: done = True # check for overall termination (if p_type == 2) if self.p_type == 2: done = done_tmr and done_eg verbose_print(2, "\n") # end of while loop # compute performance measure and update statistics if self.p_type == 0: perf_meas = gain_eg elif self.p_type == 1: perf_meas = gain_tmr else: perf_meas = gain_eg - gain_tmr est.process_next_val(perf_meas) return perf_meas def verbose_print(level, *args): """ verbose printing for different debug levels """ if level <= int(sysargs.debug): for arg in args: print(arg, end=' ') else: pass if __name__ == "__main__": # parse command line arguments parser = ArgumentParser( description="option -n [--trial --debug m --ptype a]") parser.add_argument('-n', '--num', help="Number of replications", required=True) parser.add_argument('-t', '--trial', help="Trial run to get required # of replications", action='store_true') parser.add_argument('-d', '--debug', help="Set verbose output level", default=0) parser.add_argument('-p', '--p_type', help="Performance measure: 0=eg 1=tmr 2=eg-tmr", default=0) sysargs = parser.parse_args() # ensure that results are reproducible seed0 = [12345, 12345, 12345, 12345, 12345, 12345] rngStream.SetPackageSeed(seed0) # instantiate estimator est = Estimator(95, 0.05, True) # 95% CI with 5% relative precision # Instantiate simulation model sim = Sim_Model(int(sysargs.p_type), sysargs.trial) # run simulation repetitions and collect stats #print("\n") for rep in range(int(sysargs.num)): val = sim.do_rep(est) verbose_print(1, "\nRepetition {0:3d}: {1:.2f}\n".format(rep+1, val)) # print results if sim.p_type == 0: verbose_print(0, "\nGain (eg policy): {:.2f}".format(est.get_mean())) elif sim.p_type == 1: verbose_print(0, "\nGain (tmr policy): {:.2f}".format(est.get_mean())) else: verbose_print(0, "\nGain difference (eg - tmr): {:.2f}".format(est.get_mean())) verbose_print(0, "\n{}\n".format(est.get_conf_interval())) if sysargs.trial: verbose_print(0, "{}".format(est.get_num_trials()))