/* Analyze file differences for GNU DIFF.12Copyright (C) 1988, 1989, 1992, 1993, 1994, 1995, 1998, 2001, 2002,32004 Free Software Foundation, Inc.45This file is part of GNU DIFF.67GNU DIFF is free software; you can redistribute it and/or modify8it under the terms of the GNU General Public License as published by9the Free Software Foundation; either version 2, or (at your option)10any later version.1112GNU DIFF is distributed in the hope that it will be useful,13but WITHOUT ANY WARRANTY; without even the implied warranty of14MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the15GNU General Public License for more details.1617You should have received a copy of the GNU General Public License18along with this program; see the file COPYING.19If not, write to the Free Software Foundation,2059 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */2122/* The basic algorithm is described in:23"An O(ND) Difference Algorithm and its Variations", Eugene Myers,24Algorithmica Vol. 1 No. 2, 1986, pp. 251-266;25see especially section 4.2, which describes the variation used below.26Unless the --minimal option is specified, this code uses the TOO_EXPENSIVE27heuristic, by Paul Eggert, to limit the cost to O(N**1.5 log N)28at the price of producing suboptimal output for large inputs with29many differences.3031The basic algorithm was independently discovered as described in:32"Algorithms for Approximate String Matching", E. Ukkonen,33Information and Control Vol. 64, 1985, pp. 100-118. */3435#include "diff.h"36#include <cmpbuf.h>37#include <error.h>38#include <file-type.h>39#include <xalloc.h>4041static lin *xvec, *yvec; /* Vectors being compared. */42static lin *fdiag; /* Vector, indexed by diagonal, containing431 + the X coordinate of the point furthest44along the given diagonal in the forward45search of the edit matrix. */46static lin *bdiag; /* Vector, indexed by diagonal, containing47the X coordinate of the point furthest48along the given diagonal in the backward49search of the edit matrix. */50static lin too_expensive; /* Edit scripts longer than this are too51expensive to compute. */5253#define SNAKE_LIMIT 20 /* Snakes bigger than this are considered `big'. */5455struct partition56{57lin xmid, ymid; /* Midpoints of this partition. */58bool lo_minimal; /* Nonzero if low half will be analyzed minimally. */59bool hi_minimal; /* Likewise for high half. */60};6162/* Find the midpoint of the shortest edit script for a specified63portion of the two files.6465Scan from the beginnings of the files, and simultaneously from the ends,66doing a breadth-first search through the space of edit-sequence.67When the two searches meet, we have found the midpoint of the shortest68edit sequence.6970If FIND_MINIMAL is nonzero, find the minimal edit script regardless71of expense. Otherwise, if the search is too expensive, use72heuristics to stop the search and report a suboptimal answer.7374Set PART->(xmid,ymid) to the midpoint (XMID,YMID). The diagonal number75XMID - YMID equals the number of inserted lines minus the number76of deleted lines (counting only lines before the midpoint).7778Set PART->lo_minimal to true iff the minimal edit script for the79left half of the partition is known; similarly for PART->hi_minimal.8081This function assumes that the first lines of the specified portions82of the two files do not match, and likewise that the last lines do not83match. The caller must trim matching lines from the beginning and end84of the portions it is going to specify.8586If we return the "wrong" partitions,87the worst this can do is cause suboptimal diff output.88It cannot cause incorrect diff output. */8990static void91diag (lin xoff, lin xlim, lin yoff, lin ylim, bool find_minimal,92struct partition *part)93{94lin *const fd = fdiag; /* Give the compiler a chance. */95lin *const bd = bdiag; /* Additional help for the compiler. */96lin const *const xv = xvec; /* Still more help for the compiler. */97lin const *const yv = yvec; /* And more and more . . . */98lin const dmin = xoff - ylim; /* Minimum valid diagonal. */99lin const dmax = xlim - yoff; /* Maximum valid diagonal. */100lin const fmid = xoff - yoff; /* Center diagonal of top-down search. */101lin const bmid = xlim - ylim; /* Center diagonal of bottom-up search. */102lin fmin = fmid, fmax = fmid; /* Limits of top-down search. */103lin bmin = bmid, bmax = bmid; /* Limits of bottom-up search. */104lin c; /* Cost. */105bool odd = (fmid - bmid) & 1; /* True if southeast corner is on an odd106diagonal with respect to the northwest. */107108fd[fmid] = xoff;109bd[bmid] = xlim;110111for (c = 1;; ++c)112{113lin d; /* Active diagonal. */114bool big_snake = false;115116/* Extend the top-down search by an edit step in each diagonal. */117fmin > dmin ? fd[--fmin - 1] = -1 : ++fmin;118fmax < dmax ? fd[++fmax + 1] = -1 : --fmax;119for (d = fmax; d >= fmin; d -= 2)120{121lin x, y, oldx, tlo = fd[d - 1], thi = fd[d + 1];122123if (tlo >= thi)124x = tlo + 1;125else126x = thi;127oldx = x;128y = x - d;129while (x < xlim && y < ylim && xv[x] == yv[y])130++x, ++y;131if (x - oldx > SNAKE_LIMIT)132big_snake = true;133fd[d] = x;134if (odd && bmin <= d && d <= bmax && bd[d] <= x)135{136part->xmid = x;137part->ymid = y;138part->lo_minimal = part->hi_minimal = true;139return;140}141}142143/* Similarly extend the bottom-up search. */144bmin > dmin ? bd[--bmin - 1] = LIN_MAX : ++bmin;145bmax < dmax ? bd[++bmax + 1] = LIN_MAX : --bmax;146for (d = bmax; d >= bmin; d -= 2)147{148lin x, y, oldx, tlo = bd[d - 1], thi = bd[d + 1];149150if (tlo < thi)151x = tlo;152else153x = thi - 1;154oldx = x;155y = x - d;156while (x > xoff && y > yoff && xv[x - 1] == yv[y - 1])157--x, --y;158if (oldx - x > SNAKE_LIMIT)159big_snake = true;160bd[d] = x;161if (!odd && fmin <= d && d <= fmax && x <= fd[d])162{163part->xmid = x;164part->ymid = y;165part->lo_minimal = part->hi_minimal = true;166return;167}168}169170if (find_minimal)171continue;172173/* Heuristic: check occasionally for a diagonal that has made174lots of progress compared with the edit distance.175If we have any such, find the one that has made the most176progress and return it as if it had succeeded.177178With this heuristic, for files with a constant small density179of changes, the algorithm is linear in the file size. */180181if (200 < c && big_snake && speed_large_files)182{183lin best = 0;184185for (d = fmax; d >= fmin; d -= 2)186{187lin dd = d - fmid;188lin x = fd[d];189lin y = x - d;190lin v = (x - xoff) * 2 - dd;191if (v > 12 * (c + (dd < 0 ? -dd : dd)))192{193if (v > best194&& xoff + SNAKE_LIMIT <= x && x < xlim195&& yoff + SNAKE_LIMIT <= y && y < ylim)196{197/* We have a good enough best diagonal;198now insist that it end with a significant snake. */199int k;200201for (k = 1; xv[x - k] == yv[y - k]; k++)202if (k == SNAKE_LIMIT)203{204best = v;205part->xmid = x;206part->ymid = y;207break;208}209}210}211}212if (best > 0)213{214part->lo_minimal = true;215part->hi_minimal = false;216return;217}218219best = 0;220for (d = bmax; d >= bmin; d -= 2)221{222lin dd = d - bmid;223lin x = bd[d];224lin y = x - d;225lin v = (xlim - x) * 2 + dd;226if (v > 12 * (c + (dd < 0 ? -dd : dd)))227{228if (v > best229&& xoff < x && x <= xlim - SNAKE_LIMIT230&& yoff < y && y <= ylim - SNAKE_LIMIT)231{232/* We have a good enough best diagonal;233now insist that it end with a significant snake. */234int k;235236for (k = 0; xv[x + k] == yv[y + k]; k++)237if (k == SNAKE_LIMIT - 1)238{239best = v;240part->xmid = x;241part->ymid = y;242break;243}244}245}246}247if (best > 0)248{249part->lo_minimal = false;250part->hi_minimal = true;251return;252}253}254255/* Heuristic: if we've gone well beyond the call of duty,256give up and report halfway between our best results so far. */257if (c >= too_expensive)258{259lin fxybest, fxbest;260lin bxybest, bxbest;261262fxbest = bxbest = 0; /* Pacify `gcc -Wall'. */263264/* Find forward diagonal that maximizes X + Y. */265fxybest = -1;266for (d = fmax; d >= fmin; d -= 2)267{268lin x = MIN (fd[d], xlim);269lin y = x - d;270if (ylim < y)271x = ylim + d, y = ylim;272if (fxybest < x + y)273{274fxybest = x + y;275fxbest = x;276}277}278279/* Find backward diagonal that minimizes X + Y. */280bxybest = LIN_MAX;281for (d = bmax; d >= bmin; d -= 2)282{283lin x = MAX (xoff, bd[d]);284lin y = x - d;285if (y < yoff)286x = yoff + d, y = yoff;287if (x + y < bxybest)288{289bxybest = x + y;290bxbest = x;291}292}293294/* Use the better of the two diagonals. */295if ((xlim + ylim) - bxybest < fxybest - (xoff + yoff))296{297part->xmid = fxbest;298part->ymid = fxybest - fxbest;299part->lo_minimal = true;300part->hi_minimal = false;301}302else303{304part->xmid = bxbest;305part->ymid = bxybest - bxbest;306part->lo_minimal = false;307part->hi_minimal = true;308}309return;310}311}312}313314/* Compare in detail contiguous subsequences of the two files315which are known, as a whole, to match each other.316317The results are recorded in the vectors files[N].changed, by318storing 1 in the element for each line that is an insertion or deletion.319320The subsequence of file 0 is [XOFF, XLIM) and likewise for file 1.321322Note that XLIM, YLIM are exclusive bounds.323All line numbers are origin-0 and discarded lines are not counted.324325If FIND_MINIMAL, find a minimal difference no matter how326expensive it is. */327328static void329compareseq (lin xoff, lin xlim, lin yoff, lin ylim, bool find_minimal)330{331lin const *xv = xvec; /* Help the compiler. */332lin const *yv = yvec;333334/* Slide down the bottom initial diagonal. */335while (xoff < xlim && yoff < ylim && xv[xoff] == yv[yoff])336++xoff, ++yoff;337/* Slide up the top initial diagonal. */338while (xlim > xoff && ylim > yoff && xv[xlim - 1] == yv[ylim - 1])339--xlim, --ylim;340341/* Handle simple cases. */342if (xoff == xlim)343while (yoff < ylim)344files[1].changed[files[1].realindexes[yoff++]] = 1;345else if (yoff == ylim)346while (xoff < xlim)347files[0].changed[files[0].realindexes[xoff++]] = 1;348else349{350struct partition part;351352/* Find a point of correspondence in the middle of the files. */353diag (xoff, xlim, yoff, ylim, find_minimal, &part);354355/* Use the partitions to split this problem into subproblems. */356compareseq (xoff, part.xmid, yoff, part.ymid, part.lo_minimal);357compareseq (part.xmid, xlim, part.ymid, ylim, part.hi_minimal);358}359}360361/* Discard lines from one file that have no matches in the other file.362363A line which is discarded will not be considered by the actual364comparison algorithm; it will be as if that line were not in the file.365The file's `realindexes' table maps virtual line numbers366(which don't count the discarded lines) into real line numbers;367this is how the actual comparison algorithm produces results368that are comprehensible when the discarded lines are counted.369370When we discard a line, we also mark it as a deletion or insertion371so that it will be printed in the output. */372373static void374discard_confusing_lines (struct file_data filevec[])375{376int f;377lin i;378char *discarded[2];379lin *equiv_count[2];380lin *p;381382/* Allocate our results. */383p = xmalloc ((filevec[0].buffered_lines + filevec[1].buffered_lines)384* (2 * sizeof *p));385for (f = 0; f < 2; f++)386{387filevec[f].undiscarded = p; p += filevec[f].buffered_lines;388filevec[f].realindexes = p; p += filevec[f].buffered_lines;389}390391/* Set up equiv_count[F][I] as the number of lines in file F392that fall in equivalence class I. */393394p = zalloc (filevec[0].equiv_max * (2 * sizeof *p));395equiv_count[0] = p;396equiv_count[1] = p + filevec[0].equiv_max;397398for (i = 0; i < filevec[0].buffered_lines; ++i)399++equiv_count[0][filevec[0].equivs[i]];400for (i = 0; i < filevec[1].buffered_lines; ++i)401++equiv_count[1][filevec[1].equivs[i]];402403/* Set up tables of which lines are going to be discarded. */404405discarded[0] = zalloc (filevec[0].buffered_lines406+ filevec[1].buffered_lines);407discarded[1] = discarded[0] + filevec[0].buffered_lines;408409/* Mark to be discarded each line that matches no line of the other file.410If a line matches many lines, mark it as provisionally discardable. */411412for (f = 0; f < 2; f++)413{414size_t end = filevec[f].buffered_lines;415char *discards = discarded[f];416lin *counts = equiv_count[1 - f];417lin *equivs = filevec[f].equivs;418size_t many = 5;419size_t tem = end / 64;420421/* Multiply MANY by approximate square root of number of lines.422That is the threshold for provisionally discardable lines. */423while ((tem = tem >> 2) > 0)424many *= 2;425426for (i = 0; i < end; i++)427{428lin nmatch;429if (equivs[i] == 0)430continue;431nmatch = counts[equivs[i]];432if (nmatch == 0)433discards[i] = 1;434else if (nmatch > many)435discards[i] = 2;436}437}438439/* Don't really discard the provisional lines except when they occur440in a run of discardables, with nonprovisionals at the beginning441and end. */442443for (f = 0; f < 2; f++)444{445lin end = filevec[f].buffered_lines;446register char *discards = discarded[f];447448for (i = 0; i < end; i++)449{450/* Cancel provisional discards not in middle of run of discards. */451if (discards[i] == 2)452discards[i] = 0;453else if (discards[i] != 0)454{455/* We have found a nonprovisional discard. */456register lin j;457lin length;458lin provisional = 0;459460/* Find end of this run of discardable lines.461Count how many are provisionally discardable. */462for (j = i; j < end; j++)463{464if (discards[j] == 0)465break;466if (discards[j] == 2)467++provisional;468}469470/* Cancel provisional discards at end, and shrink the run. */471while (j > i && discards[j - 1] == 2)472discards[--j] = 0, --provisional;473474/* Now we have the length of a run of discardable lines475whose first and last are not provisional. */476length = j - i;477478/* If 1/4 of the lines in the run are provisional,479cancel discarding of all provisional lines in the run. */480if (provisional * 4 > length)481{482while (j > i)483if (discards[--j] == 2)484discards[j] = 0;485}486else487{488register lin consec;489lin minimum = 1;490lin tem = length >> 2;491492/* MINIMUM is approximate square root of LENGTH/4.493A subrun of two or more provisionals can stand494when LENGTH is at least 16.495A subrun of 4 or more can stand when LENGTH >= 64. */496while (0 < (tem >>= 2))497minimum <<= 1;498minimum++;499500/* Cancel any subrun of MINIMUM or more provisionals501within the larger run. */502for (j = 0, consec = 0; j < length; j++)503if (discards[i + j] != 2)504consec = 0;505else if (minimum == ++consec)506/* Back up to start of subrun, to cancel it all. */507j -= consec;508else if (minimum < consec)509discards[i + j] = 0;510511/* Scan from beginning of run512until we find 3 or more nonprovisionals in a row513or until the first nonprovisional at least 8 lines in.514Until that point, cancel any provisionals. */515for (j = 0, consec = 0; j < length; j++)516{517if (j >= 8 && discards[i + j] == 1)518break;519if (discards[i + j] == 2)520consec = 0, discards[i + j] = 0;521else if (discards[i + j] == 0)522consec = 0;523else524consec++;525if (consec == 3)526break;527}528529/* I advances to the last line of the run. */530i += length - 1;531532/* Same thing, from end. */533for (j = 0, consec = 0; j < length; j++)534{535if (j >= 8 && discards[i - j] == 1)536break;537if (discards[i - j] == 2)538consec = 0, discards[i - j] = 0;539else if (discards[i - j] == 0)540consec = 0;541else542consec++;543if (consec == 3)544break;545}546}547}548}549}550551/* Actually discard the lines. */552for (f = 0; f < 2; f++)553{554char *discards = discarded[f];555lin end = filevec[f].buffered_lines;556lin j = 0;557for (i = 0; i < end; ++i)558if (minimal || discards[i] == 0)559{560filevec[f].undiscarded[j] = filevec[f].equivs[i];561filevec[f].realindexes[j++] = i;562}563else564filevec[f].changed[i] = 1;565filevec[f].nondiscarded_lines = j;566}567568free (discarded[0]);569free (equiv_count[0]);570}571572/* Adjust inserts/deletes of identical lines to join changes573as much as possible.574575We do something when a run of changed lines include a576line at one end and have an excluded, identical line at the other.577We are free to choose which identical line is included.578`compareseq' usually chooses the one at the beginning,579but usually it is cleaner to consider the following identical line580to be the "change". */581582static void583shift_boundaries (struct file_data filevec[])584{585int f;586587for (f = 0; f < 2; f++)588{589char *changed = filevec[f].changed;590char *other_changed = filevec[1 - f].changed;591lin const *equivs = filevec[f].equivs;592lin i = 0;593lin j = 0;594lin i_end = filevec[f].buffered_lines;595596while (1)597{598lin runlength, start, corresponding;599600/* Scan forwards to find beginning of another run of changes.601Also keep track of the corresponding point in the other file. */602603while (i < i_end && !changed[i])604{605while (other_changed[j++])606continue;607i++;608}609610if (i == i_end)611break;612613start = i;614615/* Find the end of this run of changes. */616617while (changed[++i])618continue;619while (other_changed[j])620j++;621622do623{624/* Record the length of this run of changes, so that625we can later determine whether the run has grown. */626runlength = i - start;627628/* Move the changed region back, so long as the629previous unchanged line matches the last changed one.630This merges with previous changed regions. */631632while (start && equivs[start - 1] == equivs[i - 1])633{634changed[--start] = 1;635changed[--i] = 0;636while (changed[start - 1])637start--;638while (other_changed[--j])639continue;640}641642/* Set CORRESPONDING to the end of the changed run, at the last643point where it corresponds to a changed run in the other file.644CORRESPONDING == I_END means no such point has been found. */645corresponding = other_changed[j - 1] ? i : i_end;646647/* Move the changed region forward, so long as the648first changed line matches the following unchanged one.649This merges with following changed regions.650Do this second, so that if there are no merges,651the changed region is moved forward as far as possible. */652653while (i != i_end && equivs[start] == equivs[i])654{655changed[start++] = 0;656changed[i++] = 1;657while (changed[i])658i++;659while (other_changed[++j])660corresponding = i;661}662}663while (runlength != i - start);664665/* If possible, move the fully-merged run of changes666back to a corresponding run in the other file. */667668while (corresponding < i)669{670changed[--start] = 1;671changed[--i] = 0;672while (other_changed[--j])673continue;674}675}676}677}678679/* Cons an additional entry onto the front of an edit script OLD.680LINE0 and LINE1 are the first affected lines in the two files (origin 0).681DELETED is the number of lines deleted here from file 0.682INSERTED is the number of lines inserted here in file 1.683684If DELETED is 0 then LINE0 is the number of the line before685which the insertion was done; vice versa for INSERTED and LINE1. */686687static struct change *688add_change (lin line0, lin line1, lin deleted, lin inserted,689struct change *old)690{691struct change *new = xmalloc (sizeof *new);692693new->line0 = line0;694new->line1 = line1;695new->inserted = inserted;696new->deleted = deleted;697new->link = old;698return new;699}700701/* Scan the tables of which lines are inserted and deleted,702producing an edit script in reverse order. */703704static struct change *705build_reverse_script (struct file_data const filevec[])706{707struct change *script = 0;708char *changed0 = filevec[0].changed;709char *changed1 = filevec[1].changed;710lin len0 = filevec[0].buffered_lines;711lin len1 = filevec[1].buffered_lines;712713/* Note that changedN[len0] does exist, and is 0. */714715lin i0 = 0, i1 = 0;716717while (i0 < len0 || i1 < len1)718{719if (changed0[i0] | changed1[i1])720{721lin line0 = i0, line1 = i1;722723/* Find # lines changed here in each file. */724while (changed0[i0]) ++i0;725while (changed1[i1]) ++i1;726727/* Record this change. */728script = add_change (line0, line1, i0 - line0, i1 - line1, script);729}730731/* We have reached lines in the two files that match each other. */732i0++, i1++;733}734735return script;736}737738/* Scan the tables of which lines are inserted and deleted,739producing an edit script in forward order. */740741static struct change *742build_script (struct file_data const filevec[])743{744struct change *script = 0;745char *changed0 = filevec[0].changed;746char *changed1 = filevec[1].changed;747lin i0 = filevec[0].buffered_lines, i1 = filevec[1].buffered_lines;748749/* Note that changedN[-1] does exist, and is 0. */750751while (i0 >= 0 || i1 >= 0)752{753if (changed0[i0 - 1] | changed1[i1 - 1])754{755lin line0 = i0, line1 = i1;756757/* Find # lines changed here in each file. */758while (changed0[i0 - 1]) --i0;759while (changed1[i1 - 1]) --i1;760761/* Record this change. */762script = add_change (i0, i1, line0 - i0, line1 - i1, script);763}764765/* We have reached lines in the two files that match each other. */766i0--, i1--;767}768769return script;770}771772/* If CHANGES, briefly report that two files differed.773Return 2 if trouble, CHANGES otherwise. */774static int775briefly_report (int changes, struct file_data const filevec[])776{777if (changes)778{779char const *label0 = file_label[0] ? file_label[0] : filevec[0].name;780char const *label1 = file_label[1] ? file_label[1] : filevec[1].name;781message ("Files %s and %s differ\n", label0, label1);782if (! brief)783changes = 2;784}785786return changes;787}788789/* Report the differences of two files. */790int791diff_2_files (struct comparison *cmp)792{793lin diags;794int f;795struct change *e, *p;796struct change *script;797int changes;798799800/* If we have detected that either file is binary,801compare the two files as binary. This can happen802only when the first chunk is read.803Also, --brief without any --ignore-* options means804we can speed things up by treating the files as binary. */805806if (read_files (cmp->file, files_can_be_treated_as_binary))807{808/* Files with different lengths must be different. */809if (cmp->file[0].stat.st_size != cmp->file[1].stat.st_size810&& (cmp->file[0].desc < 0 || S_ISREG (cmp->file[0].stat.st_mode))811&& (cmp->file[1].desc < 0 || S_ISREG (cmp->file[1].stat.st_mode)))812changes = 1;813814/* Standard input equals itself. */815else if (cmp->file[0].desc == cmp->file[1].desc)816changes = 0;817818else819/* Scan both files, a buffer at a time, looking for a difference. */820{821/* Allocate same-sized buffers for both files. */822size_t lcm_max = PTRDIFF_MAX - 1;823size_t buffer_size =824buffer_lcm (sizeof (word),825buffer_lcm (STAT_BLOCKSIZE (cmp->file[0].stat),826STAT_BLOCKSIZE (cmp->file[1].stat),827lcm_max),828lcm_max);829for (f = 0; f < 2; f++)830cmp->file[f].buffer = xrealloc (cmp->file[f].buffer, buffer_size);831832for (;; cmp->file[0].buffered = cmp->file[1].buffered = 0)833{834/* Read a buffer's worth from both files. */835for (f = 0; f < 2; f++)836if (0 <= cmp->file[f].desc)837file_block_read (&cmp->file[f],838buffer_size - cmp->file[f].buffered);839840/* If the buffers differ, the files differ. */841if (cmp->file[0].buffered != cmp->file[1].buffered842|| memcmp (cmp->file[0].buffer,843cmp->file[1].buffer,844cmp->file[0].buffered))845{846changes = 1;847break;848}849850/* If we reach end of file, the files are the same. */851if (cmp->file[0].buffered != buffer_size)852{853changes = 0;854break;855}856}857}858859changes = briefly_report (changes, cmp->file);860}861else862{863/* Allocate vectors for the results of comparison:864a flag for each line of each file, saying whether that line865is an insertion or deletion.866Allocate an extra element, always 0, at each end of each vector. */867868size_t s = cmp->file[0].buffered_lines + cmp->file[1].buffered_lines + 4;869char *flag_space = zalloc (s);870cmp->file[0].changed = flag_space + 1;871cmp->file[1].changed = flag_space + cmp->file[0].buffered_lines + 3;872873/* Some lines are obviously insertions or deletions874because they don't match anything. Detect them now, and875avoid even thinking about them in the main comparison algorithm. */876877discard_confusing_lines (cmp->file);878879/* Now do the main comparison algorithm, considering just the880undiscarded lines. */881882xvec = cmp->file[0].undiscarded;883yvec = cmp->file[1].undiscarded;884diags = (cmp->file[0].nondiscarded_lines885+ cmp->file[1].nondiscarded_lines + 3);886fdiag = xmalloc (diags * (2 * sizeof *fdiag));887bdiag = fdiag + diags;888fdiag += cmp->file[1].nondiscarded_lines + 1;889bdiag += cmp->file[1].nondiscarded_lines + 1;890891/* Set TOO_EXPENSIVE to be approximate square root of input size,892bounded below by 256. */893too_expensive = 1;894for (; diags != 0; diags >>= 2)895too_expensive <<= 1;896too_expensive = MAX (256, too_expensive);897898files[0] = cmp->file[0];899files[1] = cmp->file[1];900901compareseq (0, cmp->file[0].nondiscarded_lines,9020, cmp->file[1].nondiscarded_lines, minimal);903904free (fdiag - (cmp->file[1].nondiscarded_lines + 1));905906/* Modify the results slightly to make them prettier907in cases where that can validly be done. */908909shift_boundaries (cmp->file);910911/* Get the results of comparison in the form of a chain912of `struct change's -- an edit script. */913914if (output_style == OUTPUT_ED)915script = build_reverse_script (cmp->file);916else917script = build_script (cmp->file);918919/* Set CHANGES if we had any diffs.920If some changes are ignored, we must scan the script to decide. */921if (ignore_blank_lines || ignore_regexp.fastmap)922{923struct change *next = script;924changes = 0;925926while (next && changes == 0)927{928struct change *this, *end;929lin first0, last0, first1, last1;930931/* Find a set of changes that belong together. */932this = next;933end = find_change (next);934935/* Disconnect them from the rest of the changes, making them936a hunk, and remember the rest for next iteration. */937next = end->link;938end->link = 0;939940/* Determine whether this hunk is really a difference. */941if (analyze_hunk (this, &first0, &last0, &first1, &last1))942changes = 1;943944/* Reconnect the script so it will all be freed properly. */945end->link = next;946}947}948else949changes = (script != 0);950951if (brief)952changes = briefly_report (changes, cmp->file);953else954{955if (changes | !no_diff_means_no_output)956{957/* Record info for starting up output,958to be used if and when we have some output to print. */959setup_output (file_label[0] ? file_label[0] : cmp->file[0].name,960file_label[1] ? file_label[1] : cmp->file[1].name,961cmp->parent != 0);962963switch (output_style)964{965case OUTPUT_CONTEXT:966print_context_script (script, false);967break;968969case OUTPUT_UNIFIED:970print_context_script (script, true);971break;972973case OUTPUT_ED:974print_ed_script (script);975break;976977case OUTPUT_FORWARD_ED:978pr_forward_ed_script (script);979break;980981case OUTPUT_RCS:982print_rcs_script (script);983break;984985case OUTPUT_NORMAL:986print_normal_script (script);987break;988989case OUTPUT_IFDEF:990print_ifdef_script (script);991break;992993case OUTPUT_SDIFF:994print_sdiff_script (script);995break;996997default:998abort ();999}10001001finish_output ();1002}1003}10041005free (cmp->file[0].undiscarded);10061007free (flag_space);10081009for (f = 0; f < 2; f++)1010{1011free (cmp->file[f].equivs);1012free (cmp->file[f].linbuf + cmp->file[f].linbuf_base);1013}10141015for (e = script; e; e = p)1016{1017p = e->link;1018free (e);1019}10201021if (! ROBUST_OUTPUT_STYLE (output_style))1022for (f = 0; f < 2; ++f)1023if (cmp->file[f].missing_newline)1024{1025error (0, 0, "%s: %s\n",1026file_label[f] ? file_label[f] : cmp->file[f].name,1027_("No newline at end of file"));1028changes = 2;1029}1030}10311032if (cmp->file[0].buffer != cmp->file[1].buffer)1033free (cmp->file[0].buffer);1034free (cmp->file[1].buffer);10351036return changes;1037}103810391040