#! /usr/bin/perl
#
# mkoutlines
#
# Builds files containing HTML outline lists.  Typically run by mkindex.
#
# This script starts at the current directory (which must be in the
# encyclopedia hierarchy) and runs through the index file (which must
# be up-to-date, so please don't run this by hand), building HTML
# outlines at each level of the hierarchy, and writing them to
# files that can then be included with a server-side include.
# Each index.shtml file has an associated outline file, called
# list_of_children.
#
# The script can best be understood as using two algorithms, running
# in parallel with each other.  The first algorithm runs through
# the index file, finding entries which match a prefix constructed
# from the current directory name.  For matching Title and SubHead
# entries, an appropiate HTML list item is printed.  The depth of
# these entries in the hierarchy is monitored, and various <UL>
# and </UL> tags are printed to make things line up right.
# The second algorithm keeps track of a set of files, each one
# corresponding to a level in the hierarchy.  All open files
# receive each line of output, and as the script moves through
# the index file, output files are opened or closed as necessary.



# $rootdir is a UNIX path leading to the encyclopedia root directory
# $prefix is a URL path leading to the current directory

(($rootdir) = (`pwd` =~ m:^(.*/Connected):))
    || die "This script must be run from within the Connected hierarchy\n";
($prefix) = (`pwd` =~ m:(/Connected.*)$:);

$idxfile = "$rootdir/index";
$idxcmd  = "sed -f $rootdir/index.sed $idxfile |";


sub depth {
    local($url, $type) = @_;
    local($depth) = 0;
    local($i) = 0;

    while (($i = (index($url,"/",$i) + 1)) != 0) { $depth ++; }

    $depth ++ if $type eq "SubHead";
    $depth -- if $url =~ /index.s?html$/;

    return $depth;
}



sub closefile {
    local($depth) = @_;

    if ($filearray[$depth] ne "") {
	close($filearray[$depth]);
	$filearray[$depth] = "";
    }
}

sub openfile {
    local($depth, $filename) = @_;
    local($filehandle) = "FILE$depth";

    &closefile($depth);

    if (open($filehandle, ">$filename")) {
	$filearray[$depth] = $filehandle;
    } else {
	print STDERR "Can't open $filename for writing\n";
    }
}

sub printthru {
    local($limit, $string) = @_;
    local($file);

    foreach $file (@filearray[0..$limit]) {
	if ($file ne "") {
	    print $file $string;
	}
    }
}

$curdepth = 0;

open(IDX, $idxcmd);
while (<IDX>) {
    if (m:^$prefix:) {
	if (m:\tTitle\t: || m:\tSubHead\t: || m:\tSymLink\t:) {

	    ($url, $type, $text) = split(/\t/, $_);
	    chop($text);
	    $depth = &depth($url, $type);

	    while ($curdepth > $depth) {
		&closefile($curdepth);
		&printthru($curdepth, "</UL>\n");
		$curdepth --;
	    }
	    while ($curdepth < $depth) {
		&printthru($curdepth, "<UL>\n");
		$curdepth ++;
	    }

	    if ($type eq "Title") {
		&printthru($depth-1, "<LI><A HREF=\"$url\">$text</A>\n");
	    } elsif ($type eq "SymLink") {
		# Now I would _really_ like the index file to be
		# a database, but oh, well, it's just a speed hit, right?

		($junk,$junk,$name) = split(/\t/, `grep '^$text\tTitle' $idxfile`);
		chop($name);
		&printthru($depth-1, "<LI><A HREF=\"$text\">$name@</A>\n");
	    } else {
		&printthru($depth-2, "<LI>$text\n");
	    }

	    if ($type eq "Title" && ($url =~ m:index.s?html$:)) {
		$outlinefile = $url;
		$outlinefile =~ s:index.s?html$:list_of_children:;
		$outlinefile =~ s:$prefix/::;
		&openfile($curdepth, $outlinefile);
	    }

	}
    }
}

while ($curdepth > 0) {
    &printthru($curdepth-1, "</UL>\n");
    $curdepth --;
    &closefile($curdepth);
}
