#!/usr/local/bin/spy

import itertools as it, operator as op, functools as ft
from string import whitespace as spaces
import yaml, pexpect, psutil
import os, sys, stat, pwd, grp




### Stuff, originally just imported from fgc module
###
atomic = str, int, float, unicode # data types, considered uniterable
do_init = lambda data: do( (k, do_init(v)) for k,v in data.iteritems() ) if isinstance(data, dict) else data

class do(dict):
	'''DataObject - dict with JS-like key=attr access'''
	def __init__(self, *argz, **kwz):
		if len(argz) and isinstance(argz[0], atomic):
			with _import(argz[0]) as cfg: super(do, self).__init__(cfg)
			for arg in argz[1:]:
				with _import(arg) as cfg: self.update(cfg)
		else: super(do, self).__init__(*argz, **kwz)
		for k,v in self.iteritems(): self[k] = do_init(v)
	def __getattr__(self, k): return self[k]
	def __setattr__(self, k, v): dict.__setitem__(self, k, do_init(v))

def chain(*argz, **kwz):
	nonempty = kwz.get('nonempty')
	for arg in argz:
		if nonempty and arg is None: continue
		elif isinstance(arg, atomic): yield arg
		else:
			try:
				for sub in arg:
					if not nonempty or not sub is None: yield sub
			except TypeError: yield arg


import logging
def init_log():
	global log
	log = logging.getLogger('master')
	log.setLevel(logging.DEBUG
		if optz.debug else logging.WARNING)
	log_stream = logging.StreamHandler()
	log_stream.setFormatter(logging.Formatter(
		'%(asctime)s %(levelname)s %(module)s.%(funcName)s: %(message)s',
		'(%d.%m.%y %H:%M:%S)' ))
	log.addHandler(log_stream)

	def log_fatal(*argz, **kwz):
		try: crash = kwz.pop('crash')
		except KeyError: crash = False
		if crash:
			log.critical(*argz, **kwz)
			sys.exit(crash)
		else: log.critical(*argz, **kwz)
	log.fatal = log_fatal


from subprocess import Popen, PIPE
def get_bin(cmd):
	path = pipe('/usr/bin/which', cmd).read().strip(spaces)
	if path: return path
	else: raise KeyError, cmd
def pipe(*argz, **kwz): # originally just a minor subclass wrapper (fgc.exe.pipe)
	if not argz[0][0] == '/': argz = chain(get_bin(argz[0]), argz[1:])
	proc = dict(stdin=PIPE, stdout=PIPE, stderr=PIPE)
	proc.update(kwz)
	proc = Popen(argz, **proc)
	if proc.stdout:
		proc.read = proc.stdout.read
		proc.readline = proc.stdout.readline
	if proc.stdin:
		proc.write = proc.stdin.write
		def writeline(self, line, **kwz):
			line = str(line)
			if line[-1] != '\n': self.write(line+'\n', **kwz)
			else: self.write(line, **kwz)
			self.stdin.flush()
		proc.writeline = ft.partial(writeline, proc)
	return proc


class Error(OSError):
	'''Something went wrong'''
class LockError(OSError):
	'''Inability to acquire lock'''


def touch(path, mode=0644, tuid=-1, tgid=-1):
	'''Create or truncate a file with given stats.'''
	open(path, 'w')
	os.chmod(path, mode)
	os.chown(path, tuid, tgid)


from time import sleep
import fcntl

class Flock(object):
	'''Filesystem lock'''
	gc_unlock = True

	@property
	def _type(self): return fcntl.LOCK_EX if not self._shared else fcntl.LOCK_SH

	def __init__(self, path, make=False, shared=False, remove=None, timeout=None):
		self.locked = self._del = False
		if remove == None: remove = make
		try: self._lock = open(path)
		except (IOError,OSError), err:
			if make:
				touch(path)
				self._lock = open(path)
			else: raise Error, err
		if remove: self._del = path
		self._shared = shared
		if timeout is not None: self.acquire(timeout)

	def check(self, grab=False):
		if self.locked: return self.locked
		try: fcntl.flock(self._lock, self._type | fcntl.LOCK_NB)
		except IOError, ex:
			if not grab: return False
			else: return None # checked internally
		else:
			if grab:
				self.locked = True
				return self
			else:
				fcntl.flock(self._lock, fcntl.LOCK_UN)
				return False

	def acquire(self, timeout=False, interval=5, shared=None, release=False):
		# Lock is not released before re-locking by default:
		#  this way will ensure consistency but WILL
		#  cause deadlock if two scripts will call it on one file.
		# Alternative way is via release arg.
		if not self.locked or shared != self._shared:
			if release and self.locked: self.release() # break consistency, avoid deadlocks
			if not shared is None: self._shared = shared # update lock type for all future calls as well
			if not timeout:
				fcntl.flock(self._lock, self._type)
				self.locked = True
			else:
				for attempt in xrange(0, timeout, int(interval)):
					attempt = self.check(True)
					if attempt: break
					else:
						log.debug('Waiting for lock: %s'%self._lock)
						sleep(interval)
				else: raise LockError('Unable to acquire lock: %s'%self._lock)
		return self

	def release(self):
		try: fcntl.flock(self._lock, fcntl.LOCK_UN)
		except: pass
		self.locked = False
		return self

	def __del__(self):
		if self.gc_unlock:
			self.release()
			if self._del: rm(self._del, onerror=False)

	__str__ = __repr__ = __hash__ = lambda s: '<FileLock %s>'%s._lock
	def __enter__(self): return self.acquire()
	def __exit__(self, ex_type, ex_val, ex_trace): self.release()
###
### end of statically-linked stuff, real code ahead!




syn = os.path.realpath(sys.argv[0])
purpose = os.path.abspath(sys.argv[0])
user = os.getenv('USER')
uid, gid = pwd.getpwnam(user).pw_uid, grp.getgrnam(user).gr_gid
auth = (user+'@%s') if user else '%s'
get_bak = lambda cmd: cmd + '.syn_bak'

cfg = do(yaml.load(open(os.path.realpath(sys.argv[0]).rsplit('.')[0] + '.yaml')))

nodes = set(cfg.nodes)
nodes.remove(os.uname()[1].split('.')[0]) # drop self, confirming that it was there
for path in chain(cfg.get('path_ext'), nonempty=True): # it _never_ works right via profiles
	os.putenv('PATH', '%s:%s'
		% (os.path.expanduser(path), os.getenv('PATH')))
cmdz = set(it.ifilter(None, ( (spec.bin and get_bin(spec.bin))
	if isinstance(spec, dict) and 'bin' in spec else get_bin(app)
	for app,spec in cfg.control.iteritems() )))

from optparse import OptionParser
parser = OptionParser(usage='%prog [options]',
	description="Sync application paths between several nodes")
parser.add_option('-l', '--lock',
	action='store_true', dest='lock',
	help='lock current node')
parser.add_option('-s', '--status',
	action='store_true', dest='status',
	help='get node status (master / minion)')
parser.add_option('-q', '--query',
	action='store_true', dest='query',
	help='show status of all nodes')
parser.add_option('-p', '--prepare',
	action='store_true', dest='prepare',
	help='pull changes from master, but dont become one')
parser.add_option('-d', '--debug',
	action='store_true', dest='debug',
	help='print lots of debug info')
optz,argz = parser.parse_args(sys.argv[1:] if purpose not in cmdz else list())

init_log() # fgc.log.cfg(...)


### Lock
## Enforces consistency, but spits warnings
if optz.lock:
	actz = list()
	for cmd in cmdz: # check/lock pass
		if os.path.samefile(cmd, sys.argv[0]):
			log.warn('Cmd was faked already: %s'%cmd)
			continue
		lock = Flock(cmd).acquire(10) # fgc.sh.flock (pulled into this script)
		bak = get_bak(cmd)
		if os.path.exists(bak):
			log.fatal('Backup file for cmd (%s) exists: %s'%(cmd, bak), crash=2)
		else: actz.append((cmd, bak, lock))
	while actz: # action
		cmd,bak,lock = actz.pop()
		os.rename(cmd, bak) # fgc.sh.mv in original version,
			# but it's implementation is quite bulky to cram in here
		os.symlink(os.path.realpath(sys.argv[0]), cmd) # fgc.sh.ln
		lock.release()
	sys.exit()


### Determine self-status
status = None

# explicit master/minion or undefined
for cmd in cmdz:
	if os.path.samefile(cmd, syn):
		if status is None: status = 'minion'
		elif status != 'minion':
			log.debug('Command is faked: %s'%cmd)
			status = 'undefined'
	else:
		if status is None: status = 'master'
		elif status != 'master':
			log.debug('Command is not faked: %s'%cmd)
			status = 'undefined'

cmd = os.getpid()
for proc in psutil.process_iter(): # conflict if syn-proc is running
	if proc.pid == cmd: continue
	for app,spec in cfg.control.iteritems():
		if 'bin' in spec and not spec.bin: continue # no-bin app
		if app in proc.name or filter(lambda arg: app in arg, proc.cmdline):
			app = ' '.join(proc.cmdline)
			log.debug('Conflicting app: %s'%app)
			status = 'conflict (%s)'%app


## Just echo status
if optz.status:
	print status
	sys.exit()


### Multi-node operation
passdb = dict()
last_link = None

def link_pass(): # xauth wrapper
	try: os.chown(os.path.expanduser('~/.Xauthority'), os.getuid(), os.getgid()) # fgc.sh.chown
	except OSError: return _get_pass() # no xauth data available
	else:
		try: password = _get_pass()
		finally: os.chown(os.path.expanduser('~/.Xauthority'), uid, gid) # fgc.sh.chown
		return password

def _get_pass():
	global host, link, passdb, last_link
	if not last_link is link and host in passdb: return passdb[host]
	log.debug('Asking password for host %s'%host)
	pe = pipe('pinentry')
	def chk_ok():
		line = pe.readline()
		if 'OK' not in line:
			pe.stdin.close()
			raise RuntimeError, ( 'Pinentry failed - %s'%(line + pe.read()).strip(spaces)
				+ (' // %s'%pe.stderr.read().strip(spaces) if not pe.returncode is None else '') )
	try: chk_ok()
	except RuntimeError: # most likely X connection failed, try curses
		pe = pipe('pinentry-curses')
		chk_ok()
		pe.writeline('OPTION ttyname=%s'%os.ttyname(0)), chk_ok()
		pe.writeline('OPTION ttytype=%s'%(os.getenv('TERM') or 'xterm')), chk_ok()
		pe.writeline('OPTION lc-ctype=en_US.UTF-8'), chk_ok()
	lines = ( 'SETDESC Enter SSH password for %s'%host,
		'SETPROMPT %s'%host,
		'SETTITLE SSH password for %s'%host )
	if last_link is link: pe.writeline('SETERROR SSH auth failed')
	else: last_link = link
	for line in lines: pe.writeline(line), chk_ok()
	pe.writeline('GETPIN')
	line = pe.readline()
	if 'OK' in line: line = pe.readline()
	if 'ERR' in line: raise KeyboardInterrupt, 'Pinentry aborted'
	elif line.startswith('D '):
		passdb[host] = line[2:].strip(spaces)
		return passdb[host]
	else: raise RuntimeError, 'Unrecognized answer: %s'%line


link_replies = { pexpect.EOF: False,
	pexpect.TIMEOUT: False,
	'Are you sure you want to continue connecting':
		ft.partial(log.fatal, 'Host key cannot be verified', crash=2),
	'Password:': (lambda: link.sendline(link_pass())) }
query = link_replies.keys()


log.debug('Phase 1: Query nodes to find out which one is (also?) master')
master = (status == 'master') and 'self'
for host in nodes:
	link = pexpect.spawn(get_bin('ssh'), [auth%host, '%s --status'%syn])
	while True:
		reply = link_replies[query[link.expect(query)]]
		if not reply: # EOF
			reply = link.before.strip(spaces)
			link.close()
			if not link.exitstatus: break # should be valid answer
			else: log.fatal('Remote error %s: %s'%(link.exitstatus, link.before), crash=2)
		else:
			try: reply()
			except KeyboardInterrupt: log.fatal('User break, aborting', crash=2)
	if optz.query: print '%s: %s'%(host, reply)
	elif reply == 'master':
		if not master: master = host
		else: log.fatal('Duplicate master node: %s, %s'%(master, host), crash=2)
	elif reply != 'minion' and not optz.prepare: log.fatal('Bogus node: %s'%host, crash=2)

if optz.query:
	print 'self: %s'%status
	log.debug('Query stage finished, exiting')
	sys.exit()
elif status == 'master':
	log.debug('Host is undisputed master already')
	sys.exit()
elif status != 'minion': log.fatal('Bogus self-state: %s'%status, crash=2)
elif not master:
	if len(nodes) == 1 and optz.prepare: master = host
	else: log.fatal('No master node found: %s'%', '.join(nodes), crash=2)
else: log.debug('Found master: %s'%master)


log.debug('Phase 2: Pull data from master')
syncs = dict()
for spec in cfg.control.itervalues(): # spawn rsync processes
	spec = chain(spec.get('paths'), nonempty=True) if isinstance(spec, dict) else [spec]
	for path in spec:
		src_path = os.path.expanduser(path) # no realpath resolution is important: both paths could be pointing anywhere
		if os.path.exists(src_path) and os.path.islink(src_path) \
				and stat.S_ISDIR(os.stat(src_path).st_mode):
			src_path, dst_path = os.path.join(src_path, '.'), os.path.join(src_path, '') # fgc.sh.join
		else: dst_path = os.path.dirname(src_path)
		cmd = ['rsync', '-HaAxXz', '--delete', '%s:%s'%(auth%master, src_path), dst_path]
		log.debug('Sync: %s'%' '.join(cmd))
		syncs[dst_path] = pexpect.spawn(cmd[0], cmd[1:], timeout=1)
while syncs:
	for path,link in syncs.items(): # query/feed processes
		reply = query[link.expect(query)]
		if reply is pexpect.EOF:
			link.close()
			if not link.exitstatus: log.debug('Completed sync: %s'%path)
			else: log.warn('Sync (%s) crashed (%s): %s'%(path, link.exitstatus, link.before))
			del syncs[path] # whatever
		else:
			act = link_replies[reply]
			if act: act() # timeouts are passed


if optz.prepare:
	log.debug('Preparation stage finished, exiting')
	sys.exit()


log.debug('Phase 3: Promote self to master')
for cmd in cmdz:
	if not os.path.samefile(cmd, syn): # weird symlink
		log.warn('Cmd was not a fake: %s'%cmd)
		continue
	bak = get_bak(cmd)
	if os.path.exists(bak):
		if os.path.samefile(bak, syn): log.fatal('Fake in place of bak: %s'%bak, crash=2)
		os.unlink(cmd) # fgc.sh.rm
		os.rename(bak, cmd) # fgc.sh.mv
	else: log.fatal('Unable to find real binary (%s) for a fake: %s'%(bak, cmd), crash=2)


log.debug('Phase 4: Turn the rest into minions')
link = pexpect.spawn('ssh', [auth%master, '%s --lock'%syn])
while True:
	reply = query[link.expect(query)]
	if reply is pexpect.EOF:
		link.close()
		if not link.exitstatus: break # success
		else: log.fatal('Remote error %s: %s'%(link.exitstatus, link.before), crash=2)
	else:
		act = link_replies[reply]
		if act: act()


log.debug('Phase 5: Fullfill the ultimate purpose')
if purpose in cmdz:
	try:
		os.setgid(gid)
		os.setuid(uid)
	except OSError: pass
	os.execlp(sys.argv[0], *sys.argv)
