#!/usr/bin/perl
#----------------------------------------------------------------------
#	This software is in the public domain, furnished "as is", without
#	technical support, and with no warranty, express or implied, as
#	to its usefulness for any purpose.
#
#	Original author: Danny Mangosing <daniel.c.mangosing@nasa.gov>
#
#	$HeadURL:$
#	$Rev:$
#	$Author:$
#	$Date:$
#
#	DESCRIPTION:
#		The script checks the default input directory for files
#		matching a specific regex, moves file into a processing
#		directory and processes the file contents into a relational
#		database
#----------------------------------------------------------------------

=head1 NAME

process_g3b_planning.pl - Find new files, stage in process directory, and parse

=head1 SYNOPSIS

process_g3b_planning.pl

[B<-production>]

[B<-stage>]

[B<-renew>]

[B<-dry-run>]

[B<-ignore-prior-process>]

[B<-dir> I<directory>]

[B<-f> I<filename>]

[B<-use-sql-import>]

[B<-die-after> I<N>]

[B<-help>|B<-h>]

[B<-version>|B<-v>]

=head1 DESCRIPTION

The B<Process G3B Planning> script traverses through a specified directory tree
locating files prefixed with "AllEvents_SUT_". When a file is found, it is parsed
according to a known contextual structure. The parsed metadata information is
ingested into a relational database system (*SQL), making it possible to generate
more sophisticated spatial and temporal queries.

Querying the metadata database will result in output that would satisfy
the constraint criteria that a potential user would employ.

=head1 OPTIONS

=over 4

=item B<-production>

Update the production database

=item B<-stage>

Only move files to the designated processing directory

=item B<-renew>

Purge the existing database and re-import. The default is to update
existing records in the database.

=item B<-dry-run>

Show what would have been ingested.

=item B<-ignore-prior-process>

Do not perform checking to determine if the input file has been
processed previously.

=item B<-dir>

Specify a directory in which to process files.

=item B<-dir-file> I<filename>

File containing directories to be processed.

=item B<-f> I<filename>

Import a specified input file.

=item B<-use-sql-import>

Create a SQL import file rather than direct update. Output file is
named I<import.sql>.

=item B<-die-after> I<N>

Die after importing I<n> records.

=item B<-help> | B<-h>

Print man page.

=item B<-version> | B<-v>

Print release version.

=back

=head1 AUTHOR

Danny Mangosing/SSAI

Science Directorate at NASA Langley Research Center, Mail Stop 475,
Hampton, VA 23681, Phone: 757-951-1628, E-mail: daniel.c.mangosing@nasa.gov

=head1 CREATED

01/25/2016

=head1 UPGRADED

12/06/2013

Disposition

=cut

#######################################################################

# Include modules
#use strict;
use Cwd;
use File::Find;
use Getopt::Long;
use File::Basename;
use File::Spec;
use Time::Local;
use Math::Trig;
use DBI;

# Constants
use constant EARTH_RADIUS => 6378.137;
@daytab = (
    [ 0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 ],
    [ 0, 31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 ]
);
%mon2num = qw(
	jan 1 feb 2 mar 3 apr 4 may 5 jun 6
	jul 7 aug 8 sep 9 oct 10 nov 11 dec 12
);

my $script_name = basename($0, "");
my $exec_path = File::Spec->rel2abs();
$exec_path = $exec_path."/" unless ($exec_path =~ /.*\/$/);

my $usage = '
Usage: '.$script_name.' [options]
Options:

-production		Update the production database

-stage			Only move files to the designated processing directory

-renew			Purge the existing database and re-import

-dry-run		Show what would have been ingested

-ignore-prior-process	Ignore if file has been processed before

-dir directory		Set the root directory in which to process input files

-dir-file filename	File containing directories to be processed.

-f filename		Import a specified input file

-use-sql-import		Create a SQL import file rather than direct update

-die-after n		Die after importing n records

-help
-h			Print this man page.

-version
-v			Release version.
';

# Determine options
# Get command line options
unless (&GetOptions(
	"production!",
	"stage!",
	"renew!",
	"dry-run!",
	"ignore-prior-process!",
	"dir=s",
	"dir-file=s",
	"f=s",
	"use-sql-import!",
	"die-after=s",
	"h!",
	"help!")) {

	print("$usage");
	exit;
}

my $version = '@(#)$Revision$';
my $configPropFile = "..\/cfg\/sage_db_config.properties";

open CONFIG, "<$configPropFile" or die "Cannot open the properties file: $configPropFile";
my %CFG = ();
while (<CONFIG>) {
	chomp;					# no newline
	s/#.*//;				# no comments
	s/^\s+//;				# no leading white
	s/\s+$//;				# no trailing white
	next unless length;		# anything left?

    s/SAGE\.(.*)/$1/;		# remove the 'SAGE.' prefix from the variable names
    my ($var, $value) = split(/\s*=\s*/, $_, 2);

    $CFG{$var} = $value;
}
close CONFIG;

my $database_connection =
	$opt_production ?
		"DBI:mysql:dbname=".$CFG{'val_db'}.";host=".$CFG{'val_prd'}.";port=".$CFG{'val_port'} :
		"DBI:mysql:dbname=".$CFG{'val_db'}.";host=".$CFG{'val_dev'}.";port=".$CFG{'val_port'};
my $database_account = $CFG{'val_usr'};
my $database_password = $CFG{'val_pwd'};
my $db_planning_table = 'planning_product';		# The name of metadata table.
my $db_prc_table = 'planning_processed';	# The name of processed metadata table.
our ($total_seconds, $file_counter, $global_updated_records, $global_new_records);

# Process any run-time options
if ($opt_h || $opt_help) {
	$opt_h = $opt_help = 0;
	print("$usage");
	exit;
}
if ($opt_v || $opt_version) {
	$opt_v = $opt_version = 0;
	print("$version");
	exit;
}

my $root_dir = "/sage3/std-repository/g3b/data/level0/in";
$root_dir = $opt_dir if ($opt_dir);

# OK already, on with it...
print "\n##########\n".getTimestamp()." ".$script_name."\n";

# Establish a handle to the database
my $dbh = DBI->connect($database_connection,
	$database_account, $database_password,
	{ AutoCommit => 1, RaiseError => 1 })
	or die "Failed to connect to database because $DBI::errstr\n";

# If we're renewing the database table, then we are not
# concerned with the existing data records; purge the table
if ($opt_renew && !defined($opt_dry_run)) {

	print "Clearing planning tables ...\n";

	# Clear out the predicted products table
	$opt_renew = $dbh->do("DELETE FROM $db_planning_table");
	$opt_renew = $dbh->do("DELETE FROM $db_prc_table");
}

# Initialize stats variables
$total_seconds = $global_updated_records = $global_new_records = 0;

# Open the SQL import file, if specified
open(SQL_IMPORT, ">$exec_path/import.sql")
	if ($opt_use_sql_import && !defined($opt_dry_run));

$| = 1;

@data_dirs = ();

# Process the root at the specified data directories
if (($opt_f) && (-e $opt_f)) {

	print "Processing file ".$opt_f."\n";

	process_input_file($opt_f);

} elsif ($opt_dir) {

	if (-d $opt_dir) {
		print "Processing files in $opt_dir\n";
		finddepth(\&process_input_directory, $opt_dir);
	}

} elsif ($opt_dir_file) {

	if (-e $opt_dir_file) {

		open(DIR_FILE, $opt_dir_file) ||
			die "ERROR: Can't open $opt_dir_file: $!";

		# Process the ancillary file line by line
		while (<DIR_FILE>) {

			# Get rid of linefeed / carriage return
			chop;

			# Get rid of leading snd trailing spaces
			s/^\s*(.*?)\s*$/$1/;

			push @data_dirs, $_;
		}
		close(DIR_FILE);

		foreach my $process_dir (sort @data_dirs) {
			#print "[$process_dir]\n";

			if (-d $process_dir) {
				print getTimestamp()." Processing files in $process_dir\n";
				finddepth(\&process_input_directory, $process_dir);
			}
		}

	} else {
		print "ERROR: $opt_dir_file does not exist.\n";
	}
} else {
	die "ERROR: a processing directory needs to be specified.\n";
}

unless (defined($opt_dry_run)) {
	close(SQL_IMPORT) if ($opt_use_sql_import);

	# Now, disconnect from the database
	$dbh->disconnect() or warn "Disconnection failed: $DBI::errstr\n";
}

printf "%d new records. %d updated records.\n",
	$global_new_records, $global_updated_records;
printf("Done. (%.1f minutes total processing)\n", $total_seconds / 60.0);

exit;

sub getTimestamp {
#======================================================================
#	METHOD:
#		getTimestamp
#
#	ARGUMENTS:
#		none
#
#	DESCRIPTION:
#		return the current timestamp in local system time
#
#	RETURNS:
#		timestamp string: YYYY-MM-DD HH:mm:ss
#
# example: "2014-05-19 16:28:55"
#======================================================================

	my (@now, $curr_datetime);
	@now = localtime(time);

	$curr_datetime = sprintf(
		"%4d-%02d-%02d %02d:%02d:%02d",
		$now[5] + 1900, $now[4] + 1, $now[3],
		$now[2], $now[1], $now[0]);

	return $curr_datetime;
}

sub process_input_directory() {
#======================================================================
#	METHOD:
#		process_input_directory
#
#	ARGUMENTS:
#		filename	-	Candidate filename to be processed
#
#	DESCRIPTION:
#		Given the full pathname to an input file, parse the relevant
#		fields in the file to be stored in a relational database table
#
#	RETURNS:
#		None
#======================================================================
	my $fullpath = $File::Find::name;

	# Are we looking a valid input file?
	if ((-f $fullpath) && ($fullpath =~ /AllEvents_SUT.*csv$/) && (-s $fullpath > 0)) {

		process_input_file($fullpath);
	}

	return 0;
}

sub process_input_file() {
#======================================================================
#	METHOD:
#		process_input_files
#
#	ARGUMENTS:
#		filename	-	Candidate filename to be processed
#
#	DESCRIPTION:
#		Given the full pathname to an input file, parse the relevant
#		fields in the file to be stored in a relational database table
#
#	RETURNS:
#		None
#======================================================================
	my $filename = shift;

	# DEBUG
	print getTimestamp()."\tDEBUG: Processing [".$filename."] ... \n";

	# Determine if file has already been processed
	unless (was_processed($filename)) {

		# Mark the time
		my $mark0 = time;

		# Parse the file
		if (parse_file($filename)) {

			# Mark the time
			$mark1 = time;
			$process_seconds = $mark1 - $mark0;
			$total_seconds += $process_seconds;
			#printf(" (%.1f minutes)\n", ($mark1 - $mark0) / 60.0);

			# Update processed files table
			$sql_stmt = "INSERT INTO $db_prc_table VALUES ('%s','%s','%s')";
			$curr_datetime = getTimestamp();

#			$realpath = getcwd;
			$dirname = dirname($filename);

			$do_stmt = sprintf($sql_stmt,
#				$realpath,
				$dirname,
				basename($filename),
				$curr_datetime);

			unless (defined($opt_dry_run)) {
				$rows_affected = $dbh->do($do_stmt) ||
					DbiError($dbh->err, $dbh->errstr);
			}

			printf("%s Processed: %s (%.f seconds)\n", $curr_datetime,
				$filename, $process_seconds);

		} else {
			print "\tThere was a problem processing $filename. Please debug.";
		}
	}
}

sub was_processed {
#======================================================================
#	METHOD:
#		was_processed
#
#	ARGUMENTS:
#		planning_aid_filename	-	Candidate file to be processed
#
#	DESCRIPTION:
#		For a given METADATASOURCE file, determine if it has been
#		previously processed
#
#	RETURNS:
#		Zero value indicates that file has not been processed
#		previously; non-zero indicates that it has been processed
#		before
#======================================================================

	local($planning_aid_filename) = @_;
	my $processed = 0;

	unless (defined($opt_ignore_prior_process)) {
		my $sql_stmt = qq(SELECT COUNT(*) FROM $db_prc_table WHERE planning_aid_filename=\'$planning_aid_filename\');
		my $sth = $dbh->prepare($sql_stmt);
		$sth->execute();
		$processed = $sth->fetchrow_array;
		$sth->finish();
	}

	return $processed;
}

sub parse_file {
#======================================================================
#	METHOD:
#		parse_file
#
#	ARGUMENTS:
#		pathname	-	Full pathname to input file to be parsed
#
#	DESCRIPTION:
#		Given the full pathname to a .met file, parse the relevant
#		fields in the file to be stored in a relational database table
#
#	RETURNS:
#		Associative array of field=value pairs representing relevant
#		metadata attributes of the HDF data product file
#======================================================================

	local($pathname) = @_;
	my($line_count) = 0;

	# Open file for processing
	open(INPUT_FILE, $pathname) ||
		die "ERROR: Can't open $pathname: $!";

	$planning_aid_filename = basename($pathname);

	# Process the ancillary file line by line
	while (<INPUT_FILE>) {

        chomp;
print "[".$_."]\n\n";
        # The line is a valid record if it matches the record pattern
#03 Dec 2017 00:09:00,1196295014, LIMB1,0,-22,-36.181459,-144.144426,5.695468,-15.988461,169.561284,15.661264,407.666141,8000
#       if (/^[0-9]{2}\s+\w+\s+[0-9]{4}\s+[0-9]{2}(:[0-9]{2}){2}(\.[0-9]+)*,\s+[0-9]+,\s+\w+(,\s+[-+]*[0-9]+\.[0-9]+)+,\s+[0-9]+$/) {
#       if (/^[0-9]{2}\s+\w+\s+[0-9]{4}\s+[0-9]{2}(:[0-9]{2}){2}(\.[0-9]+)*,\s*[0-9]+,\s*\w+(,\s*[-+]*[0-9]+(\.[0-9]+)*)+,\s*[0-9]+$/) {
#04 Mar 2018 00:21:41,1204158119,Sunrise,340.4106649341232,-19.50160328099562,44.69831320426339,94.04336648230014,-18.42446292680723,-5.4410780908461,153.78537059722169,89.99753316749192,398.3263599550752,10216,None,N/A
        if (/^[0-9]{2}\s+\w+\s+[0-9]{4}\s+[0-9]{2}(:[0-9]{2}){2}(\.[0-9]+)*,\s*[0-9]+,\s*\w+(,\s*[-+]*[0-9]+(\.[0-9]+)*)+(,\w+,[\w\s\/]+)*$/) {
            # Parse the input line
#Tangent Time-(UTC),Tangent Time-(GPS),Event Type,Azimuth (deg),Elevation (deg),Latitude (deg),Longitude (deg),Solar Beta Angle (deg),Lunar Beta Angle (deg),Lunar Phase Angle (deg),Solar Zenith (deg),ISS Detic Altitude (km),ISS Pass Number,Obscuration Likelyhood,Notes
#12 Jun 2018 00:26:38,1212798416,Sunset,190.8736107316476,-19.443209875331025,33.40998424384666,-80.35334338657775,-10.246510573121567,0.9998926387002455,25.358140161954232,89.99758404821478,399.8898350904258,11771,None,N/A
            ($date_str, $gps_time, $evtype_str, $tp_az, $tp_el, $tp_lat, $tp_lon,
                $solar_beta_angle, $lunar_beta_angle, $lphase, $tp_sza,
				$sc_alt, $orbit, $obsc_str, $obsc_notes) = split(",", $_);

            # Normalize date string to format for SQL
			($pday, $mon_str, $pyear, $ptimestr) = split(" ", $date_str);
			$pmon = $mon2num { lc substr($mon_str, 0, 3) };

			print getTimestamp()."\tDEBUG: [".$ptimestr,"]\n";
			if ($ptimestr =~ /[0-9]{2}(:[0-9]{2}){2}\.[0-9]+/) {

				$ptime = $tp_fracsecs = $ptimestr;
				$ptime =~ s/([0-9]{2}(:[0-9]{2}){2})(.*)/$1/;
				$tp_fracsecs =~ s/([0-9]{2}:[0-9]{2}:[0-9]{2})(.[0-9]+)/$2/;

			} else {
				$ptime = $ptimestr;
				$tp_fracsecs = 0.0;
			}

            $tp_date = sprintf("%04d-%02d-%02d %s",
                $pyear, $pmon, $pday, $ptime);

#			DEBUG:
			print getTimestamp()."\tDEBUG: [".$date_str,"] => ".$tp_date."\n";

            $event_type = 0;
			$evtype_str =~ s/^\s+//;
			$evtype_str =~ s/\s+$//;
			$evtype_str  = lc($evtype_str);

            if ($evtype_str =~ /sunrise/) { $event_type = 10; }
            elsif ($evtype_str =~ /sunset/) { $event_type = 20; }
            elsif ($evtype_str =~ /moonrise/) { $event_type = 30; }
            elsif ($evtype_str =~ /moonset/) { $event_type = 40; }
            elsif ($evtype_str =~ /limb/) { $event_type = 50; }

			$obscuration = 0;
			$obsc_str =~ s/^\s+//;
			$obsc_str =~ s/\s+$//;
			$obsc_str  = lc($obsc_str);

            if ($obsc_str =~ /low/) { $obscuration = 10; }
            elsif ($obsc_str =~ /medium/) { $obscuration = 20; }
            elsif ($obsc_str =~ /high/) { $obscuration = 30; }
            elsif ($obsc_str =~ /extreme/) { $obscuration = 40; }
            elsif ($obsc_str =~ /none/) { $obscuration = 0; }

#			DEBUG:
			print getTimestamp()."\tDEBUG: [".$evtype_str,"] => ".$event_type."\n";
#			exit if ($evtype_str =~ /limb/);

			# Calculate the cycle ID tag from the event date
			($tmp_date, $tmp_time) = split(" ", $tp_date);
			($syear, $smon, $sday) = split("-", $tmp_date);
			$event_date = sprintf("%04d%02d%02d", $syear, $smon, $sday);

			$cycle_id_tag = get_cycle_date($event_date, 0);
			$event_id_tag = sprintf("%06d%02d", $orbit, $event_type);

			$visible = 1;

#	+-----------------------+---------------------+---------------------------------------------+
#	| Field                 | Type                |                         					|
#	+-----------------------+---------------------+---------------------------------------------+
#	| cycle_id_tag          | int(10) unsigned    | Cycle ID                					|
#	| event_id_tag          | int(10) unsigned    | Event ID                					|
#	| tp_date               | datetime            | Tangent Time-(UTC)      					|
#	| tp_fracsecs           | double(16,8)        |                         					|
#	| gps_time              | int(10) unsigned    | Tangent Time-(GPS)      					|
#	| event_type            | tinyint(3) unsigned | Event Type              					|
#	| tp_az                 | double(16,8)        | Azimuth (deg)           					|
#	| tp_el                 | double(16,8)        | Elevation (deg)         					|
#	| tp_lat                | double(16,8)        | Latitude (deg)          					|
#	| tp_lon                | double(16,8)        | Longitude (deg)         					|
#	| solar_beta_angle      | double(16,8)        | Solar Beta Angle (deg)  					|
#	| lunar_beta_angle      | double(16,8)        | Lunar Beta Angle (deg)  					|
#	| lphase                | double(16,8)        | Lunar Phase Angle (deg) 					|
#	| tp_sza                | double(16,8)        | Solar Zenith (deg)      					|
#	| sc_alt                | double(16,8)        | ISS Detic Altitude (km)						|
#	| orbit                 | int(10) unsigned    | ISS Pass Number								|
#	| obscuration           | tinyint(1) unsigned | Obscuration None|Low|Medium|High|Extreme	|
#	| notes                 | text                | Obscuration notes							|
#	| visible               | tinyint(1) unsigned | Search visibility 0=hidden 1=visible		|
#	| planning_aid_filename | text                | Planning aid file name						|
#	+-----------------------+---------------------+---------------------------------------------+

			if ($opt_dry_run) {

				# Write record to table
#				printf "INSERT INTO $db_planning_table VALUES (%d, %d, '%s', %.8f, %d, %d, %.8f, %.8f, %.8f, %.8f, %.8f, %.8f, %.8f, %.8f, %.8f, %d, '%s')\n",
				printf "INSERT INTO $db_planning_table VALUES (%d, %d, '%s', %.8f, %d, %d, %.8f, %.8f, %.8f, %.8f, %.8f, %.8f, %.8f, %.8f, %.8f, %d, %d, '%s', %d, '%s')\n",
					$cycle_id_tag,
					$event_id_tag,
					$tp_date,
					$tp_fracsecs,
					$gps_time,
					$event_type,
					$tp_az,
					$tp_el,
					$tp_lat,
					$tp_lon,
					$solar_beta_angle,
					$lunar_beta_angle,
					$lphase,
					$tp_sza,
					$sc_alt,
					$orbit,
					$obscuration,
					$obsc_notes,
					$visible,
					$planning_aid_filename;

				$global_new_records++;

			} else {

				# Check to see if event ID already exists in the
				# database table
				$update_record = 0;
				$sql_stmt = qq(SELECT COUNT(*) FROM $db_planning_table WHERE event_id_tag=$event_id_tag);
				$sth = $dbh->prepare($sql_stmt);
				$sth->execute;
				$update_record = $sth->fetchrow_array;

				if ($update_record > 0) {
#					$sql_stmt = "UPDATE $db_planning_table SET cycle_id_tag=%d, event_id_tag=%d, tp_date='%s', tp_fracsecs=%.8f, gps_time=%d, event_type=%d, tp_az=%.8f, tp_el=%.8f, tp_lat=%.8f, tp_lon=%.8f, solar_beta_angle=%.8f, lunar_beta_angle=%.8f, lphase=%.3f, tp_sza=%.8f, sc_alt=%.8f, orbit=%d, planning_aid_filename='%s' WHERE event_id_tag=$event_id_tag";
					$sql_stmt = "UPDATE $db_planning_table SET cycle_id_tag=%d, event_id_tag=%d, tp_date='%s', tp_fracsecs=%.8f, gps_time=%d, event_type=%d, tp_az=%.8f, tp_el=%.8f, tp_lat=%.8f, tp_lon=%.8f, solar_beta_angle=%.8f, lunar_beta_angle=%.8f, lphase=%.3f, tp_sza=%.8f, sc_alt=%.8f, orbit=%d, obscuration=%d, notes='%s', visible=%d, planning_aid_filename='%s' WHERE event_id_tag=$event_id_tag";
					$global_updated_records++;
				} else {
#					$sql_stmt = "INSERT INTO $db_planning_table VALUES (%d, %d, '%s', %.8f, %d, %d, %.8f, %.8f, %.8f, %.8f, %.8f, %.8f, %.8f, %.8f, %.8f, %d, '%s')";
					$sql_stmt = "INSERT INTO $db_planning_table VALUES (%d, %d, '%s', %.8f, %d, %d, %.8f, %.8f, %.8f, %.8f, %.8f, %.8f, %.8f, %.8f, %.8f, %d, %d, '%s', %d, '%s')";
					$global_new_records++;
				}


				if ($opt_use_mysql_import) {

					printf MYSQL_IMPORT $sql_stmt."\n",
						$cycle_id_tag,
						$event_id_tag,
						$tp_date,
						$tp_fracsecs,
						$gps_time,
						$event_type,
						$tp_az,
						$tp_el,
						$tp_lat,
						$tp_lon,
						$solar_beta_angle,
						$lunar_beta_angle,
						$lphase,
						$tp_sza,
						$sc_alt,
						$orbit,
						$obscuration,
						$obsc_notes,
						$visible,
						$planning_aid_filename;

				} else {

					$do_stmt = sprintf($sql_stmt,
						$cycle_id_tag,
						$event_id_tag,
						$tp_date,
						$tp_fracsecs,
						$gps_time,
						$event_type,
						$tp_az,
						$tp_el,
						$tp_lat,
						$tp_lon,
						$solar_beta_angle,
						$lunar_beta_angle,
						$lphase,
						$tp_sza,
						$sc_alt,
						$orbit,
						$obscuration,
						$obsc_notes,
						$visible,
						$planning_aid_filename);

print "DEBUG: [".$do_stmt."]\n";

					$rows_affected = $dbh->do($do_stmt) || DbiError($dbh->err, $dbh->errstr);
				}

				$sth->finish();
            }

			$line_count++;

			if ($opt_die_after) {

				if ($line_count > $opt_die_after) {

					close(SQL_IMPORT) if ($opt_use_sql_import);
					die;
				}
			}
        }
    }

	close(INPUT_FILE);

    return 1;
}

sub interpolate_latlonalt() {
#======================================================================
#	METHOD:
#		interpolate_latlonalt
#
#	ARGUMENTS:
#		lat0, lat1	-	Start and end latitude pair
#		lon0, lon1	-	Start and end longitude pair
#		alt0, alt1	-	Start and end altitude pair
#		numint		-	Number of intermediate points to be generated
#
#	DESCRIPTION:
#		Given two vectors (lat/lon/alt), linearly interpolate each
#		(x, y, z) component of each vector to generate numint number
#		of vectors between the input vectors
#
#	RETURNS:
#		New arrays of latitude, longitude, and altitude
#======================================================================

	my $lat0 = $_[0];
	my $lat1 = $_[1];
	my $lon0 = $_[2];
	my $lon1 = $_[3];
	my $alt0 = $_[4];
	my $alt1 = $_[5];
	my $numint = $_[6];

	# Convert lat/lon/alt to vector (assume alt = 0.0)
	($x0, $y0, $z0) = lla2xyz($lon0, $lat0, $alt0);
	($x1, $y1, $z1) = lla2xyz($lon1, $lat1, $alt1);

	# Linearly interpolate each component of the vector
	@newx = linear_interp($x0, $x1, $numint);
	@newy = linear_interp($y0, $y1, $numint);
	@newz = linear_interp($z0, $z1, $numint);

	@newlon = @newlat = @newalt = ();

	for (my $i = 0; $i <= $numint; $i++) {

		($tmplon, $tmplat, $tmpalt) =
			xyz2lla($newx[$i], $newy[$i], $newz[$i]);

		push @newlon, $tmplon;
		push @newlat, $tmplat;
		push @newalt, $tmpalt;
	}

	return (\@newlon, \@newlat, \@newalt);
}

sub lla2xyz() {
#======================================================================
#	METHOD:
#		lla2xyz
#
#	ARGUMENTS:
#		lat, lon, alt	-	Input latitude, longitude and altitude
#
#	DESCRIPTION:
#		Convert lat/lon/alt (LLA) to component vector (XYZ)
#
#	RETURNS:
#		Returns X, Y, Z components of LLA vector
#======================================================================

	my ($lon, $lat, $alt) = @_;
	my ($x, $y, $z);

	$intlon = deg2rad($lon);
	$intlat = deg2rad($lat);
	$intalt = $alt + EARTH_RADIUS;

	$coslat = cos($intlat);
	$x = $intalt * $coslat * cos($intlon);
	$y = $intalt * $coslat * sin($intlon);
	$z = $intalt * sin($intlat);

	return $x, $y, $z;
}

sub xyz2lla() {
#======================================================================
#	METHOD:
#		xyz2lla
#
#	ARGUMENTS:
#		x, y, z	-	X, Y, Z component vectors
#
#	DESCRIPTION:
#		Converts XYZ component vectors into longitude, latitude and
#		altitude (LLA)
#
#	RETURNS:
#		Returns longitude, latitude and altitude representation of
#		the input component vector
#======================================================================

	my ($x, $y, $z) = @_;
	my ($lon, $lat, $alt, $rad);

	$rad = sqrt($x**2 + $y**2 + $z**2);
	$lon = rad2deg(asin($z / $rad));
	$lat = rad2deg(atan2($y, $x));
	$alt = $rad - EARTH_RADIUS;

	return $lon, $lat, $alt;
}

sub linear_interp() {
#======================================================================
#	METHOD:
#		linear_interp
#
#	ARGUMENTS:
#		p0, p1	-	Two points, P0 and P1, to interpolate
#		steps	-	Number of steps to interpolate between points
#
#	DESCRIPTION:
#		Given two points, P0 and P1, linearly interpolate between
#		the two by a given number of steps
#
#	RETURNS:
#		Returns the array vector of intermediate points generated
#		from the input points
#======================================================================

	my ($p0, $p1, $steps) = @_;
	my ($inc, $p);
	my @v = ();

	$inc = abs($p1 - $p0) / $steps;

	$p = $p0;
	push @v, $p;

	while ($steps--) {
		$p = ($p1 > $p0) ? ($p + $inc) : ($p - $inc);
		push @v, $p;
	}
	push @v, $p1;

	return @v;
}

#-----------------------------------------------------------------
#    get_cycle_date --
#-----------------------------------------------------------------
sub get_cycle_date
{
    my($in_date) = $_[0];
    my($designated_dow) = $_[1];

    $in_date =~ s/(\d{4})(\d{2})(\d{2})/$1-$2-$3/;
    ($inyear, $inmonth, $inday) = split("-", $in_date);
    $input_date = timegm(0, 0, 0, $inday, $inmonth - 1, $inyear - 1900);

    # Determine the month day and the day of the week for the input date
    $wday  = (gmtime($input_date))[6];

    # If the current day is less than the designated day of the
    # week, then we only need to subtract a week to determine
    # the day for the cycle ID. Otherwise, calculate the number
    # of days difference between the current day and the designated
    # day of the week. Then subtract a week. The value returned
    # should be converted to seconds.
    $delta_days = ($wday < $designated_dow) ? (7 - $designated_dow + $wday) :
        ($wday - $designated_dow);
#        ($wday - $designated_dow + 7);

    $dd = $delta_days;

    $delta_days *= 86400;

    $cytime = $input_date - $delta_days;

    # Convert back to date string
    ($cymday, $cymon, $cyyear) = (gmtime($cytime))[3, 4, 5];

    $cyyear += 1900;
    $cymon++;

    $cy_string = sprintf("%04d%02d%02d", $cyyear, $cymon, $cymday);

    return ($cy_string);
}
