Inkscape
Vector Graphics Editor
Loading...
Searching...
No Matches
spiro.cpp
Go to the documentation of this file.
1// SPDX-License-Identifier: GPL-2.0-or-later
/*
5 * Authors: see git history
6 * Raph Levien
7 * Johan Engelen
8 *
9 * Copyright (C) 2018 Authors
10 * Released under GNU GPL v2+, read the file 'COPYING' for more information.
11 */
12
13#include "spiro.h"
14
15#include <cmath>
16#include <cstdlib>
17#include <cstring>
18
19#define SPIRO_SHOW_INFINITE_COORDINATE_CALLS
20
21namespace Spiro {
22
23Geom::Path spiro_run(const spiro_cp *src, int src_len)
24{
25 spiro_seg *s = Spiro::run_spiro(src, src_len);
26 Geom::Path path;
27 Spiro::ConverterPath bc(path);
28 Spiro::spiro_to_otherpath(s, src_len, bc);
29 free(s);
30 return path;
31}
32
33/************************************
34 * Spiro math
35 */
36
37struct spiro_seg_s {
38 double x;
39 double y;
40 char ty;
41 double bend_th;
42 double ks[4];
43 double seg_ch;
44 double seg_th;
45 double l;
46};
47
48struct bandmat {
49 double a[11]; /* band-diagonal matrix */
50 double al[5]; /* lower part of band-diagonal decomposition */
51};
52
53#ifndef M_PI
54#define M_PI 3.14159265358979323846 /* pi */
55#endif
56
57int n = 4;
58
59#ifndef ORDER
60#define ORDER 12
61#endif
62
63/* Integrate polynomial spiral curve over range -.5 .. .5. */
64static void
65integrate_spiro(const double ks[4], double xy[2])
66{
67#if 0
68 int n = 1024;
69#endif
70 double th1 = ks[0];
71 double th2 = .5 * ks[1];
72 double th3 = (1./6) * ks[2];
73 double th4 = (1./24) * ks[3];
74 double x, y;
75 double ds = 1. / n;
76 double ds2 = ds * ds;
77 double ds3 = ds2 * ds;
78 double k0 = ks[0] * ds;
79 double k1 = ks[1] * ds;
80 double k2 = ks[2] * ds;
81 double k3 = ks[3] * ds;
82 int i;
83 double s = .5 * ds - .5;
84
85 x = 0;
86 y = 0;
87
88 for (i = 0; i < n; i++) {
89
90#if ORDER > 2
91 double u, v;
92 double km0, km1, km2, km3;
93
94 if (n == 1) {
95 km0 = k0;
96 km1 = k1 * ds;
97 km2 = k2 * ds2;
98 } else {
99 km0 = (((1./6) * k3 * s + .5 * k2) * s + k1) * s + k0;
100 km1 = ((.5 * k3 * s + k2) * s + k1) * ds;
101 km2 = (k3 * s + k2) * ds2;
102 }
103 km3 = k3 * ds3;
104#endif
105
106 {
107
108#if ORDER == 4
109 double km0_2 = km0 * km0;
110 u = 24 - km0_2;
111 v = km1;
112#endif
113
114#if ORDER == 6
115 double km0_2 = km0 * km0;
116 double km0_4 = km0_2 * km0_2;
117 u = 24 - km0_2 + (km0_4 - 4 * km0 * km2 - 3 * km1 * km1) * (1./80);
118 v = km1 + (km3 - 6 * km0_2 * km1) * (1./80);
119#endif
120
121#if ORDER == 8
122 double t1_1 = km0;
123 double t1_2 = .5 * km1;
124 double t1_3 = (1./6) * km2;
125 double t1_4 = (1./24) * km3;
126 double t2_2 = t1_1 * t1_1;
127 double t2_3 = 2 * (t1_1 * t1_2);
128 double t2_4 = 2 * (t1_1 * t1_3) + t1_2 * t1_2;
129 double t2_5 = 2 * (t1_1 * t1_4 + t1_2 * t1_3);
130 double t2_6 = 2 * (t1_2 * t1_4) + t1_3 * t1_3;
131 double t3_4 = t2_2 * t1_2 + t2_3 * t1_1;
132 double t3_6 = t2_2 * t1_4 + t2_3 * t1_3 + t2_4 * t1_2 + t2_5 * t1_1;
133 double t4_4 = t2_2 * t2_2;
134 double t4_5 = 2 * (t2_2 * t2_3);
135 double t4_6 = 2 * (t2_2 * t2_4) + t2_3 * t2_3;
136 double t5_6 = t4_4 * t1_2 + t4_5 * t1_1;
137 double t6_6 = t4_4 * t2_2;
138 u = 1;
139 v = 0;
140 v += (1./12) * t1_2 + (1./80) * t1_4;
141 u -= (1./24) * t2_2 + (1./160) * t2_4 + (1./896) * t2_6;
142 v -= (1./480) * t3_4 + (1./2688) * t3_6;
143 u += (1./1920) * t4_4 + (1./10752) * t4_6;
144 v += (1./53760) * t5_6;
145 u -= (1./322560) * t6_6;
146#endif
147
148#if ORDER == 10
149 double t1_1 = km0;
150 double t1_2 = .5 * km1;
151 double t1_3 = (1./6) * km2;
152 double t1_4 = (1./24) * km3;
153 double t2_2 = t1_1 * t1_1;
154 double t2_3 = 2 * (t1_1 * t1_2);
155 double t2_4 = 2 * (t1_1 * t1_3) + t1_2 * t1_2;
156 double t2_5 = 2 * (t1_1 * t1_4 + t1_2 * t1_3);
157 double t2_6 = 2 * (t1_2 * t1_4) + t1_3 * t1_3;
158 double t2_7 = 2 * (t1_3 * t1_4);
159 double t2_8 = t1_4 * t1_4;
160 double t3_4 = t2_2 * t1_2 + t2_3 * t1_1;
161 double t3_6 = t2_2 * t1_4 + t2_3 * t1_3 + t2_4 * t1_2 + t2_5 * t1_1;
162 double t3_8 = t2_4 * t1_4 + t2_5 * t1_3 + t2_6 * t1_2 + t2_7 * t1_1;
163 double t4_4 = t2_2 * t2_2;
164 double t4_5 = 2 * (t2_2 * t2_3);
165 double t4_6 = 2 * (t2_2 * t2_4) + t2_3 * t2_3;
166 double t4_7 = 2 * (t2_2 * t2_5 + t2_3 * t2_4);
167 double t4_8 = 2 * (t2_2 * t2_6 + t2_3 * t2_5) + t2_4 * t2_4;
168 double t5_6 = t4_4 * t1_2 + t4_5 * t1_1;
169 double t5_8 = t4_4 * t1_4 + t4_5 * t1_3 + t4_6 * t1_2 + t4_7 * t1_1;
170 double t6_6 = t4_4 * t2_2;
171 double t6_7 = t4_4 * t2_3 + t4_5 * t2_2;
172 double t6_8 = t4_4 * t2_4 + t4_5 * t2_3 + t4_6 * t2_2;
173 double t7_8 = t6_6 * t1_2 + t6_7 * t1_1;
174 double t8_8 = t6_6 * t2_2;
175 u = 1;
176 v = 0;
177 v += (1./12) * t1_2 + (1./80) * t1_4;
178 u -= (1./24) * t2_2 + (1./160) * t2_4 + (1./896) * t2_6 + (1./4608) * t2_8;
179 v -= (1./480) * t3_4 + (1./2688) * t3_6 + (1./13824) * t3_8;
180 u += (1./1920) * t4_4 + (1./10752) * t4_6 + (1./55296) * t4_8;
181 v += (1./53760) * t5_6 + (1./276480) * t5_8;
182 u -= (1./322560) * t6_6 + (1./1.65888e+06) * t6_8;
183 v -= (1./1.16122e+07) * t7_8;
184 u += (1./9.28973e+07) * t8_8;
185#endif
186
187#if ORDER == 12
188 double t1_1 = km0;
189 double t1_2 = .5 * km1;
190 double t1_3 = (1./6) * km2;
191 double t1_4 = (1./24) * km3;
192 double t2_2 = t1_1 * t1_1;
193 double t2_3 = 2 * (t1_1 * t1_2);
194 double t2_4 = 2 * (t1_1 * t1_3) + t1_2 * t1_2;
195 double t2_5 = 2 * (t1_1 * t1_4 + t1_2 * t1_3);
196 double t2_6 = 2 * (t1_2 * t1_4) + t1_3 * t1_3;
197 double t2_7 = 2 * (t1_3 * t1_4);
198 double t2_8 = t1_4 * t1_4;
199 double t3_4 = t2_2 * t1_2 + t2_3 * t1_1;
200 double t3_6 = t2_2 * t1_4 + t2_3 * t1_3 + t2_4 * t1_2 + t2_5 * t1_1;
201 double t3_8 = t2_4 * t1_4 + t2_5 * t1_3 + t2_6 * t1_2 + t2_7 * t1_1;
202 double t3_10 = t2_6 * t1_4 + t2_7 * t1_3 + t2_8 * t1_2;
203 double t4_4 = t2_2 * t2_2;
204 double t4_5 = 2 * (t2_2 * t2_3);
205 double t4_6 = 2 * (t2_2 * t2_4) + t2_3 * t2_3;
206 double t4_7 = 2 * (t2_2 * t2_5 + t2_3 * t2_4);
207 double t4_8 = 2 * (t2_2 * t2_6 + t2_3 * t2_5) + t2_4 * t2_4;
208 double t4_9 = 2 * (t2_2 * t2_7 + t2_3 * t2_6 + t2_4 * t2_5);
209 double t4_10 = 2 * (t2_2 * t2_8 + t2_3 * t2_7 + t2_4 * t2_6) + t2_5 * t2_5;
210 double t5_6 = t4_4 * t1_2 + t4_5 * t1_1;
211 double t5_8 = t4_4 * t1_4 + t4_5 * t1_3 + t4_6 * t1_2 + t4_7 * t1_1;
212 double t5_10 = t4_6 * t1_4 + t4_7 * t1_3 + t4_8 * t1_2 + t4_9 * t1_1;
213 double t6_6 = t4_4 * t2_2;
214 double t6_7 = t4_4 * t2_3 + t4_5 * t2_2;
215 double t6_8 = t4_4 * t2_4 + t4_5 * t2_3 + t4_6 * t2_2;
216 double t6_9 = t4_4 * t2_5 + t4_5 * t2_4 + t4_6 * t2_3 + t4_7 * t2_2;
217 double t6_10 = t4_4 * t2_6 + t4_5 * t2_5 + t4_6 * t2_4 + t4_7 * t2_3 + t4_8 * t2_2;
218 double t7_8 = t6_6 * t1_2 + t6_7 * t1_1;
219 double t7_10 = t6_6 * t1_4 + t6_7 * t1_3 + t6_8 * t1_2 + t6_9 * t1_1;
220 double t8_8 = t6_6 * t2_2;
221 double t8_9 = t6_6 * t2_3 + t6_7 * t2_2;
222 double t8_10 = t6_6 * t2_4 + t6_7 * t2_3 + t6_8 * t2_2;
223 double t9_10 = t8_8 * t1_2 + t8_9 * t1_1;
224 double t10_10 = t8_8 * t2_2;
225 u = 1;
226 v = 0;
227 v += (1./12) * t1_2 + (1./80) * t1_4;
228 u -= (1./24) * t2_2 + (1./160) * t2_4 + (1./896) * t2_6 + (1./4608) * t2_8;
229 v -= (1./480) * t3_4 + (1./2688) * t3_6 + (1./13824) * t3_8 + (1./67584) * t3_10;
230 u += (1./1920) * t4_4 + (1./10752) * t4_6 + (1./55296) * t4_8 + (1./270336) * t4_10;
231 v += (1./53760) * t5_6 + (1./276480) * t5_8 + (1./1.35168e+06) * t5_10;
232 u -= (1./322560) * t6_6 + (1./1.65888e+06) * t6_8 + (1./8.11008e+06) * t6_10;
233 v -= (1./1.16122e+07) * t7_8 + (1./5.67706e+07) * t7_10;
234 u += (1./9.28973e+07) * t8_8 + (1./4.54164e+08) * t8_10;
235 v += (1./4.08748e+09) * t9_10;
236 u -= (1./4.08748e+10) * t10_10;
237#endif
238
239#if ORDER == 14
240 double t1_1 = km0;
241 double t1_2 = .5 * km1;
242 double t1_3 = (1./6) * km2;
243 double t1_4 = (1./24) * km3;
244 double t2_2 = t1_1 * t1_1;
245 double t2_3 = 2 * (t1_1 * t1_2);
246 double t2_4 = 2 * (t1_1 * t1_3) + t1_2 * t1_2;
247 double t2_5 = 2 * (t1_1 * t1_4 + t1_2 * t1_3);
248 double t2_6 = 2 * (t1_2 * t1_4) + t1_3 * t1_3;
249 double t2_7 = 2 * (t1_3 * t1_4);
250 double t2_8 = t1_4 * t1_4;
251 double t3_4 = t2_2 * t1_2 + t2_3 * t1_1;
252 double t3_6 = t2_2 * t1_4 + t2_3 * t1_3 + t2_4 * t1_2 + t2_5 * t1_1;
253 double t3_8 = t2_4 * t1_4 + t2_5 * t1_3 + t2_6 * t1_2 + t2_7 * t1_1;
254 double t3_10 = t2_6 * t1_4 + t2_7 * t1_3 + t2_8 * t1_2;
255 double t3_12 = t2_8 * t1_4;
256 double t4_4 = t2_2 * t2_2;
257 double t4_5 = 2 * (t2_2 * t2_3);
258 double t4_6 = 2 * (t2_2 * t2_4) + t2_3 * t2_3;
259 double t4_7 = 2 * (t2_2 * t2_5 + t2_3 * t2_4);
260 double t4_8 = 2 * (t2_2 * t2_6 + t2_3 * t2_5) + t2_4 * t2_4;
261 double t4_9 = 2 * (t2_2 * t2_7 + t2_3 * t2_6 + t2_4 * t2_5);
262 double t4_10 = 2 * (t2_2 * t2_8 + t2_3 * t2_7 + t2_4 * t2_6) + t2_5 * t2_5;
263 double t4_11 = 2 * (t2_3 * t2_8 + t2_4 * t2_7 + t2_5 * t2_6);
264 double t4_12 = 2 * (t2_4 * t2_8 + t2_5 * t2_7) + t2_6 * t2_6;
265 double t5_6 = t4_4 * t1_2 + t4_5 * t1_1;
266 double t5_8 = t4_4 * t1_4 + t4_5 * t1_3 + t4_6 * t1_2 + t4_7 * t1_1;
267 double t5_10 = t4_6 * t1_4 + t4_7 * t1_3 + t4_8 * t1_2 + t4_9 * t1_1;
268 double t5_12 = t4_8 * t1_4 + t4_9 * t1_3 + t4_10 * t1_2 + t4_11 * t1_1;
269 double t6_6 = t4_4 * t2_2;
270 double t6_7 = t4_4 * t2_3 + t4_5 * t2_2;
271 double t6_8 = t4_4 * t2_4 + t4_5 * t2_3 + t4_6 * t2_2;
272 double t6_9 = t4_4 * t2_5 + t4_5 * t2_4 + t4_6 * t2_3 + t4_7 * t2_2;
273 double t6_10 = t4_4 * t2_6 + t4_5 * t2_5 + t4_6 * t2_4 + t4_7 * t2_3 + t4_8 * t2_2;
274 double t6_11 = t4_4 * t2_7 + t4_5 * t2_6 + t4_6 * t2_5 + t4_7 * t2_4 + t4_8 * t2_3 + t4_9 * t2_2;
275 double t6_12 = t4_4 * t2_8 + t4_5 * t2_7 + t4_6 * t2_6 + t4_7 * t2_5 + t4_8 * t2_4 + t4_9 * t2_3 + t4_10 * t2_2;
276 double t7_8 = t6_6 * t1_2 + t6_7 * t1_1;
277 double t7_10 = t6_6 * t1_4 + t6_7 * t1_3 + t6_8 * t1_2 + t6_9 * t1_1;
278 double t7_12 = t6_8 * t1_4 + t6_9 * t1_3 + t6_10 * t1_2 + t6_11 * t1_1;
279 double t8_8 = t6_6 * t2_2;
280 double t8_9 = t6_6 * t2_3 + t6_7 * t2_2;
281 double t8_10 = t6_6 * t2_4 + t6_7 * t2_3 + t6_8 * t2_2;
282 double t8_11 = t6_6 * t2_5 + t6_7 * t2_4 + t6_8 * t2_3 + t6_9 * t2_2;
283 double t8_12 = t6_6 * t2_6 + t6_7 * t2_5 + t6_8 * t2_4 + t6_9 * t2_3 + t6_10 * t2_2;
284 double t9_10 = t8_8 * t1_2 + t8_9 * t1_1;
285 double t9_12 = t8_8 * t1_4 + t8_9 * t1_3 + t8_10 * t1_2 + t8_11 * t1_1;
286 double t10_10 = t8_8 * t2_2;
287 double t10_11 = t8_8 * t2_3 + t8_9 * t2_2;
288 double t10_12 = t8_8 * t2_4 + t8_9 * t2_3 + t8_10 * t2_2;
289 double t11_12 = t10_10 * t1_2 + t10_11 * t1_1;
290 double t12_12 = t10_10 * t2_2;
291 u = 1;
292 v = 0;
293 v += (1./12) * t1_2 + (1./80) * t1_4;
294 u -= (1./24) * t2_2 + (1./160) * t2_4 + (1./896) * t2_6 + (1./4608) * t2_8;
295 v -= (1./480) * t3_4 + (1./2688) * t3_6 + (1./13824) * t3_8 + (1./67584) * t3_10 + (1./319488) * t3_12;
296 u += (1./1920) * t4_4 + (1./10752) * t4_6 + (1./55296) * t4_8 + (1./270336) * t4_10 + (1./1.27795e+06) * t4_12;
297 v += (1./53760) * t5_6 + (1./276480) * t5_8 + (1./1.35168e+06) * t5_10 + (1./6.38976e+06) * t5_12;
298 u -= (1./322560) * t6_6 + (1./1.65888e+06) * t6_8 + (1./8.11008e+06) * t6_10 + (1./3.83386e+07) * t6_12;
299 v -= (1./1.16122e+07) * t7_8 + (1./5.67706e+07) * t7_10 + (1./2.6837e+08) * t7_12;
300 u += (1./9.28973e+07) * t8_8 + (1./4.54164e+08) * t8_10 + (1./2.14696e+09) * t8_12;
301 v += (1./4.08748e+09) * t9_10 + (1./1.93226e+10) * t9_12;
302 u -= (1./4.08748e+10) * t10_10 + (1./1.93226e+11) * t10_12;
303 v -= (1./2.12549e+12) * t11_12;
304 u += (1./2.55059e+13) * t12_12;
305#endif
306
307#if ORDER == 16
308 double t1_1 = km0;
309 double t1_2 = .5 * km1;
310 double t1_3 = (1./6) * km2;
311 double t1_4 = (1./24) * km3;
312 double t2_2 = t1_1 * t1_1;
313 double t2_3 = 2 * (t1_1 * t1_2);
314 double t2_4 = 2 * (t1_1 * t1_3) + t1_2 * t1_2;
315 double t2_5 = 2 * (t1_1 * t1_4 + t1_2 * t1_3);
316 double t2_6 = 2 * (t1_2 * t1_4) + t1_3 * t1_3;
317 double t2_7 = 2 * (t1_3 * t1_4);
318 double t2_8 = t1_4 * t1_4;
319 double t3_4 = t2_2 * t1_2 + t2_3 * t1_1;
320 double t3_6 = t2_2 * t1_4 + t2_3 * t1_3 + t2_4 * t1_2 + t2_5 * t1_1;
321 double t3_8 = t2_4 * t1_4 + t2_5 * t1_3 + t2_6 * t1_2 + t2_7 * t1_1;
322 double t3_10 = t2_6 * t1_4 + t2_7 * t1_3 + t2_8 * t1_2;
323 double t3_12 = t2_8 * t1_4;
324 double t4_4 = t2_2 * t2_2;
325 double t4_5 = 2 * (t2_2 * t2_3);
326 double t4_6 = 2 * (t2_2 * t2_4) + t2_3 * t2_3;
327 double t4_7 = 2 * (t2_2 * t2_5 + t2_3 * t2_4);
328 double t4_8 = 2 * (t2_2 * t2_6 + t2_3 * t2_5) + t2_4 * t2_4;
329 double t4_9 = 2 * (t2_2 * t2_7 + t2_3 * t2_6 + t2_4 * t2_5);
330 double t4_10 = 2 * (t2_2 * t2_8 + t2_3 * t2_7 + t2_4 * t2_6) + t2_5 * t2_5;
331 double t4_11 = 2 * (t2_3 * t2_8 + t2_4 * t2_7 + t2_5 * t2_6);
332 double t4_12 = 2 * (t2_4 * t2_8 + t2_5 * t2_7) + t2_6 * t2_6;
333 double t4_13 = 2 * (t2_5 * t2_8 + t2_6 * t2_7);
334 double t4_14 = 2 * (t2_6 * t2_8) + t2_7 * t2_7;
335 double t5_6 = t4_4 * t1_2 + t4_5 * t1_1;
336 double t5_8 = t4_4 * t1_4 + t4_5 * t1_3 + t4_6 * t1_2 + t4_7 * t1_1;
337 double t5_10 = t4_6 * t1_4 + t4_7 * t1_3 + t4_8 * t1_2 + t4_9 * t1_1;
338 double t5_12 = t4_8 * t1_4 + t4_9 * t1_3 + t4_10 * t1_2 + t4_11 * t1_1;
339 double t5_14 = t4_10 * t1_4 + t4_11 * t1_3 + t4_12 * t1_2 + t4_13 * t1_1;
340 double t6_6 = t4_4 * t2_2;
341 double t6_7 = t4_4 * t2_3 + t4_5 * t2_2;
342 double t6_8 = t4_4 * t2_4 + t4_5 * t2_3 + t4_6 * t2_2;
343 double t6_9 = t4_4 * t2_5 + t4_5 * t2_4 + t4_6 * t2_3 + t4_7 * t2_2;
344 double t6_10 = t4_4 * t2_6 + t4_5 * t2_5 + t4_6 * t2_4 + t4_7 * t2_3 + t4_8 * t2_2;
345 double t6_11 = t4_4 * t2_7 + t4_5 * t2_6 + t4_6 * t2_5 + t4_7 * t2_4 + t4_8 * t2_3 + t4_9 * t2_2;
346 double t6_12 = t4_4 * t2_8 + t4_5 * t2_7 + t4_6 * t2_6 + t4_7 * t2_5 + t4_8 * t2_4 + t4_9 * t2_3 + t4_10 * t2_2;
347 double t6_13 = t4_5 * t2_8 + t4_6 * t2_7 + t4_7 * t2_6 + t4_8 * t2_5 + t4_9 * t2_4 + t4_10 * t2_3 + t4_11 * t2_2;
348 double t6_14 = t4_6 * t2_8 + t4_7 * t2_7 + t4_8 * t2_6 + t4_9 * t2_5 + t4_10 * t2_4 + t4_11 * t2_3 + t4_12 * t2_2;
349 double t7_8 = t6_6 * t1_2 + t6_7 * t1_1;
350 double t7_10 = t6_6 * t1_4 + t6_7 * t1_3 + t6_8 * t1_2 + t6_9 * t1_1;
351 double t7_12 = t6_8 * t1_4 + t6_9 * t1_3 + t6_10 * t1_2 + t6_11 * t1_1;
352 double t7_14 = t6_10 * t1_4 + t6_11 * t1_3 + t6_12 * t1_2 + t6_13 * t1_1;
353 double t8_8 = t6_6 * t2_2;
354 double t8_9 = t6_6 * t2_3 + t6_7 * t2_2;
355 double t8_10 = t6_6 * t2_4 + t6_7 * t2_3 + t6_8 * t2_2;
356 double t8_11 = t6_6 * t2_5 + t6_7 * t2_4 + t6_8 * t2_3 + t6_9 * t2_2;
357 double t8_12 = t6_6 * t2_6 + t6_7 * t2_5 + t6_8 * t2_4 + t6_9 * t2_3 + t6_10 * t2_2;
358 double t8_13 = t6_6 * t2_7 + t6_7 * t2_6 + t6_8 * t2_5 + t6_9 * t2_4 + t6_10 * t2_3 + t6_11 * t2_2;
359 double t8_14 = t6_6 * t2_8 + t6_7 * t2_7 + t6_8 * t2_6 + t6_9 * t2_5 + t6_10 * t2_4 + t6_11 * t2_3 + t6_12 * t2_2;
360 double t9_10 = t8_8 * t1_2 + t8_9 * t1_1;
361 double t9_12 = t8_8 * t1_4 + t8_9 * t1_3 + t8_10 * t1_2 + t8_11 * t1_1;
362 double t9_14 = t8_10 * t1_4 + t8_11 * t1_3 + t8_12 * t1_2 + t8_13 * t1_1;
363 double t10_10 = t8_8 * t2_2;
364 double t10_11 = t8_8 * t2_3 + t8_9 * t2_2;
365 double t10_12 = t8_8 * t2_4 + t8_9 * t2_3 + t8_10 * t2_2;
366 double t10_13 = t8_8 * t2_5 + t8_9 * t2_4 + t8_10 * t2_3 + t8_11 * t2_2;
367 double t10_14 = t8_8 * t2_6 + t8_9 * t2_5 + t8_10 * t2_4 + t8_11 * t2_3 + t8_12 * t2_2;
368 double t11_12 = t10_10 * t1_2 + t10_11 * t1_1;
369 double t11_14 = t10_10 * t1_4 + t10_11 * t1_3 + t10_12 * t1_2 + t10_13 * t1_1;
370 double t12_12 = t10_10 * t2_2;
371 double t12_13 = t10_10 * t2_3 + t10_11 * t2_2;
372 double t12_14 = t10_10 * t2_4 + t10_11 * t2_3 + t10_12 * t2_2;
373 double t13_14 = t12_12 * t1_2 + t12_13 * t1_1;
374 double t14_14 = t12_12 * t2_2;
375 u = 1;
376 u -= 1./24 * t2_2 + 1./160 * t2_4 + 1./896 * t2_6 + 1./4608 * t2_8;
377 u += 1./1920 * t4_4 + 1./10752 * t4_6 + 1./55296 * t4_8 + 1./270336 * t4_10 + 1./1277952 * t4_12 + 1./5898240 * t4_14;
378 u -= 1./322560 * t6_6 + 1./1658880 * t6_8 + 1./8110080 * t6_10 + 1./38338560 * t6_12 + 1./176947200 * t6_14;
379 u += 1./92897280 * t8_8 + 1./454164480 * t8_10 + 4.6577500191e-10 * t8_12 + 1.0091791708e-10 * t8_14;
380 u -= 2.4464949595e-11 * t10_10 + 5.1752777990e-12 * t10_12 + 1.1213101898e-12 * t10_14;
381 u += 3.9206649992e-14 * t12_12 + 8.4947741650e-15 * t12_14;
382 u -= 4.6674583324e-17 * t14_14;
383 v = 0;
384 v += 1./12 * t1_2 + 1./80 * t1_4;
385 v -= 1./480 * t3_4 + 1./2688 * t3_6 + 1./13824 * t3_8 + 1./67584 * t3_10 + 1./319488 * t3_12;
386 v += 1./53760 * t5_6 + 1./276480 * t5_8 + 1./1351680 * t5_10 + 1./6389760 * t5_12 + 1./29491200 * t5_14;
387 v -= 1./11612160 * t7_8 + 1./56770560 * t7_10 + 1./268369920 * t7_12 + 8.0734333664e-10 * t7_14;
388 v += 2.4464949595e-10 * t9_10 + 5.1752777990e-11 * t9_12 + 1.1213101898e-11 * t9_14;
389 v -= 4.7047979991e-13 * t11_12 + 1.0193728998e-13 * t11_14;
390 v += 6.5344416654e-16 * t13_14;
391#endif
392
393 }
394
395 if (n == 1) {
396#if ORDER == 2
397 x = 1;
398 y = 0;
399#else
400 x = u;
401 y = v;
402#endif
403 } else {
404 double th = (((th4 * s + th3) * s + th2) * s + th1) * s;
405 double cth = cos(th);
406 double sth = sin(th);
407
408#if ORDER == 2
409 x += cth;
410 y += sth;
411#else
412 x += cth * u - sth * v;
413 y += cth * v + sth * u;
414#endif
415 s += ds;
416 }
417 }
418
419#if ORDER == 4 || ORDER == 6
420 xy[0] = x * (1./24 * ds);
421 xy[1] = y * (1./24 * ds);
422#else
423 xy[0] = x * ds;
424 xy[1] = y * ds;
425#endif
426}
427
428static double
429compute_ends(const double ks[4], double ends[2][4], double seg_ch)
430{
431 double xy[2];
432 double ch, th;
433 double l, l2, l3;
434 double th_even, th_odd;
435 double k0_even, k0_odd;
436 double k1_even, k1_odd;
437 double k2_even, k2_odd;
438
439 integrate_spiro(ks, xy);
440 ch = hypot(xy[0], xy[1]);
441 th = atan2(xy[1], xy[0]);
442 l = ch / seg_ch;
443
444 th_even = .5 * ks[0] + (1./48) * ks[2];
445 th_odd = .125 * ks[1] + (1./384) * ks[3] - th;
446 ends[0][0] = th_even - th_odd;
447 ends[1][0] = th_even + th_odd;
448 k0_even = l * (ks[0] + .125 * ks[2]);
449 k0_odd = l * (.5 * ks[1] + (1./48) * ks[3]);
450 ends[0][1] = k0_even - k0_odd;
451 ends[1][1] = k0_even + k0_odd;
452 l2 = l * l;
453 k1_even = l2 * (ks[1] + .125 * ks[3]);
454 k1_odd = l2 * .5 * ks[2];
455 ends[0][2] = k1_even - k1_odd;
456 ends[1][2] = k1_even + k1_odd;
457 l3 = l2 * l;
458 k2_even = l3 * ks[2];
459 k2_odd = l3 * .5 * ks[3];
460 ends[0][3] = k2_even - k2_odd;
461 ends[1][3] = k2_even + k2_odd;
462
463 return l;
464}
465
466static void
467compute_pderivs(const spiro_seg *s, double ends[2][4], double derivs[4][2][4],
468 int jinc)
469{
470 double recip_d = 2e6;
471 double delta = 1./ recip_d;
472 double try_ks[4];
473 double try_ends[2][4];
474 int i, j, k;
475
476 compute_ends(s->ks, ends, s->seg_ch);
477 for (i = 0; i < jinc; i++) {
478 for (j = 0; j < 4; j++)
479 try_ks[j] = s->ks[j];
480 try_ks[i] += delta;
481 compute_ends(try_ks, try_ends, s->seg_ch);
482 for (k = 0; k < 2; k++)
483 for (j = 0; j < 4; j++)
484 derivs[j][k][i] = recip_d * (try_ends[k][j] - ends[k][j]);
485 }
486}
487
488static double
489mod_2pi(double th)
490{
491 double u = th / (2 * M_PI);
492 return 2 * M_PI * (u - floor(u + 0.5));
493}
494
495static spiro_seg *
496setup_path(const spiro_cp *src, int n)
497{
498 int n_seg = src[0].ty == '{' ? n - 1 : n;
499 spiro_seg *r = (spiro_seg *)malloc((n_seg + 1) * sizeof(spiro_seg));
500 int i;
501 int ilast;
502
503 for (i = 0; i < n_seg; i++) {
504 r[i].x = src[i].x;
505 r[i].y = src[i].y;
506 r[i].ty = src[i].ty;
507 r[i].ks[0] = 0.;
508 r[i].ks[1] = 0.;
509 r[i].ks[2] = 0.;
510 r[i].ks[3] = 0.;
511 }
512 r[n_seg].x = src[n_seg % n].x;
513 r[n_seg].y = src[n_seg % n].y;
514 r[n_seg].ty = src[n_seg % n].ty;
515
516 for (i = 0; i < n_seg; i++) {
517 double dx = r[i + 1].x - r[i].x;
518 double dy = r[i + 1].y - r[i].y;
519 r[i].seg_ch = hypot(dx, dy);
520 r[i].seg_th = atan2(dy, dx);
521 }
522
523 ilast = n_seg - 1;
524 for (i = 0; i < n_seg; i++) {
525 if (r[i].ty == '{' || r[i].ty == '}' || r[i].ty == 'v')
526 r[i].bend_th = 0.;
527 else
528 r[i].bend_th = mod_2pi(r[i].seg_th - r[ilast].seg_th);
529 ilast = i;
530 }
531 return r;
532}
533
534static void
535bandec11(bandmat *m, int *perm, int n)
536{
537 int i, j, k;
538 int l;
539
540 /* pack top triangle to the left. */
541 for (i = 0; i < 5; i++) {
542 for (j = 0; j < i + 6; j++)
543 m[i].a[j] = m[i].a[j + 5 - i];
544 for (; j < 11; j++)
545 m[i].a[j] = 0.;
546 }
547 l = 5;
548 for (k = 0; k < n; k++) {
549 int pivot = k;
550 double pivot_val = m[k].a[0];
551 double pivot_scale;
552
553 l = l < n ? l + 1 : n;
554
555 for (j = k + 1; j < l; j++)
556 if (fabs(m[j].a[0]) > fabs(pivot_val)) {
557 pivot_val = m[j].a[0];
558 pivot = j;
559 }
560
561 perm[k] = pivot;
562 if (pivot != k) {
563 for (j = 0; j < 11; j++) {
564 double tmp = m[k].a[j];
565 m[k].a[j] = m[pivot].a[j];
566 m[pivot].a[j] = tmp;
567 }
568 }
569
570 if (fabs(pivot_val) < 1e-12) pivot_val = 1e-12;
571 pivot_scale = 1. / pivot_val;
572 for (i = k + 1; i < l; i++) {
573 double x = m[i].a[0] * pivot_scale;
574 m[k].al[i - k - 1] = x;
575 for (j = 1; j < 11; j++)
576 m[i].a[j - 1] = m[i].a[j] - x * m[k].a[j];
577 m[i].a[10] = 0.;
578 }
579 }
580}
581
582static void
583banbks11(const bandmat *m, const int *perm, double *v, int n)
584{
585 int i, k, l;
586
587 /* forward substitution */
588 l = 5;
589 for (k = 0; k < n; k++) {
590 i = perm[k];
591 if (i != k) {
592 double tmp = v[k];
593 v[k] = v[i];
594 v[i] = tmp;
595 }
596 if (l < n) l++;
597 for (i = k + 1; i < l; i++)
598 v[i] -= m[k].al[i - k - 1] * v[k];
599 }
600
601 /* back substitution */
602 l = 1;
603 for (i = n - 1; i >= 0; i--) {
604 double x = v[i];
605 for (k = 1; k < l; k++)
606 x -= m[i].a[k] * v[k + i];
607 v[i] = x / m[i].a[0];
608 if (l < 11) l++;
609 }
610}
611
612static int compute_jinc(char ty0, char ty1)
613{
614 if (ty0 == 'o' || ty1 == 'o' ||
615 ty0 == ']' || ty1 == '[')
616 return 4;
617 else if (ty0 == 'c' && ty1 == 'c')
618 return 2;
619 else if (((ty0 == '{' || ty0 == 'v' || ty0 == '[') && ty1 == 'c') ||
620 (ty0 == 'c' && (ty1 == '}' || ty1 == 'v' || ty1 == ']')))
621 return 1;
622 else
623 return 0;
624}
625
626static int count_vec(const spiro_seg *s, int nseg)
627{
628 int i;
629 int n = 0;
630
631 for (i = 0; i < nseg; i++)
632 n += compute_jinc(s[i].ty, s[i + 1].ty);
633 return n;
634}
635
636static void
637add_mat_line(bandmat *m, double *v,
638 double derivs[4], double x, double y, int j, int jj, int jinc,
639 int nmat)
640{
641 if (jj >= 0) {
642 int joff = (j + 5 - jj + nmat) % nmat;
643 if (nmat < 6) {
644 joff = j + 5 - jj;
645 } else if (nmat == 6) {
646 joff = 2 + (j + 3 - jj + nmat) % nmat;
647 }
648#ifdef VERBOSE
649 printf("add_mat_line j=%d jj=%d jinc=%d nmat=%d joff=%d\n", j, jj, jinc, nmat, joff);
650#endif
651 v[jj] += x;
652 for (int k = 0; k < jinc; k++)
653 m[jj].a[joff + k] += y * derivs[k];
654 }
655}
656
657static double
658spiro_iter(spiro_seg *s, bandmat *m, int *perm, double *v, const int n)
659{
660 int cyclic = s[0].ty != '{' && s[0].ty != 'v';
661 int nmat = count_vec(s, n);
662 int n_invert;
663
664 for (int i = 0; i < nmat; i++) {
665 v[i] = 0.;
666 for (double & j : m[i].a) {
667 j = 0.;
668 }
669 for (double & j : m[i].al) {
670 j = 0.;
671 }
672 }
673
674 int j = 0;
675 int jj;
676 if (s[0].ty == 'o') {
677 jj = nmat - 2;
678 } else if (s[0].ty == 'c') {
679 jj = nmat - 1;
680 } else {
681 jj = 0;
682 }
683 for (int i = 0; i < n; i++) {
684 char ty0 = s[i].ty;
685 char ty1 = s[i + 1].ty;
686 int jinc = compute_jinc(ty0, ty1);
687 double th = s[i].bend_th;
688 double ends[2][4];
689 double derivs[4][2][4];
690 int jthl = -1, jk0l = -1, jk1l = -1, jk2l = -1;
691 int jthr = -1, jk0r = -1, jk1r = -1, jk2r = -1;
692
693 compute_pderivs(&s[i], ends, derivs, jinc);
694
695 /* constraints crossing left */
696 if (ty0 == 'o' || ty0 == 'c' || ty0 == '[' || ty0 == ']') {
697 jthl = jj++;
698 jj %= nmat;
699 jk0l = jj++;
700 }
701 if (ty0 == 'o') {
702 jj %= nmat;
703 jk1l = jj++;
704 jk2l = jj++;
705 }
706
707 /* constraints on left */
708 if ((ty0 == '[' || ty0 == 'v' || ty0 == '{' || ty0 == 'c') &&
709 jinc == 4) {
710 if (ty0 != 'c')
711 jk1l = jj++;
712 jk2l = jj++;
713 }
714
715 /* constraints on right */
716 if ((ty1 == ']' || ty1 == 'v' || ty1 == '}' || ty1 == 'c') &&
717 jinc == 4) {
718 if (ty1 != 'c')
719 jk1r = jj++;
720 jk2r = jj++;
721 }
722
723 /* constraints crossing right */
724 if (ty1 == 'o' || ty1 == 'c' || ty1 == '[' || ty1 == ']') {
725 jthr = jj;
726 jk0r = (jj + 1) % nmat;
727 }
728 if (ty1 == 'o') {
729 jk1r = (jj + 2) % nmat;
730 jk2r = (jj + 3) % nmat;
731 }
732
733 add_mat_line(m, v, derivs[0][0], th - ends[0][0], 1, j, jthl, jinc, nmat);
734 add_mat_line(m, v, derivs[1][0], ends[0][1], -1, j, jk0l, jinc, nmat);
735 add_mat_line(m, v, derivs[2][0], ends[0][2], -1, j, jk1l, jinc, nmat);
736 add_mat_line(m, v, derivs[3][0], ends[0][3], -1, j, jk2l, jinc, nmat);
737 add_mat_line(m, v, derivs[0][1], -ends[1][0], 1, j, jthr, jinc, nmat);
738 add_mat_line(m, v, derivs[1][1], -ends[1][1], 1, j, jk0r, jinc, nmat);
739 add_mat_line(m, v, derivs[2][1], -ends[1][2], 1, j, jk1r, jinc, nmat);
740 add_mat_line(m, v, derivs[3][1], -ends[1][3], 1, j, jk2r, jinc, nmat);
741 if (jthl >= 0)
742 v[jthl] = mod_2pi(v[jthl]);
743 if (jthr >= 0)
744 v[jthr] = mod_2pi(v[jthr]);
745 j += jinc;
746 }
747 if (cyclic) {
748 memcpy(m + nmat, m, sizeof(bandmat) * nmat);
749 memcpy(m + 2 * nmat, m, sizeof(bandmat) * nmat);
750 memcpy(v + nmat, v, sizeof(double) * nmat);
751 memcpy(v + 2 * nmat, v, sizeof(double) * nmat);
752 n_invert = 3 * nmat;
753 j = nmat;
754 } else {
755 n_invert = nmat;
756 j = 0;
757 }
758#ifdef VERBOSE
759 for (int i = 0; i < n; i++) {
760 for (int k = 0; k < 11; k++) {
761 printf(" %2.4f", m[i].a[k]);
762 }
763 printf(": %2.4f\n", v[i]);
764 }
765 printf("---\n");
766#endif
767 bandec11(m, perm, n_invert);
768 banbks11(m, perm, v, n_invert);
769
770 double norm = 0.;
771 for (int i = 0; i < n; i++) {
772 char ty0 = s[i].ty;
773 char ty1 = s[i + 1].ty;
774 int jinc = compute_jinc(ty0, ty1);
775 int k;
776
777 for (k = 0; k < jinc; k++) {
778 double dk = v[j++];
779
780#ifdef VERBOSE
781 printf("s[%d].ks[%d] += %f\n", i, k, dk);
782#endif
783 s[i].ks[k] += dk;
784 norm += dk * dk;
785 }
786 s[i].ks[0] = 2.0*mod_2pi(s[i].ks[0]/2.0);
787 }
788 return norm;
789}
790
791static int
792solve_spiro(spiro_seg *s, const int nseg)
793{
794 int nmat = count_vec(s, nseg);
795 int n_alloc = nmat;
796
797 if (nmat == 0) {
798 return 0;
799 }
800 if (s[0].ty != '{' && s[0].ty != 'v') {
801 n_alloc *= 3;
802 }
803 if (n_alloc < 5) {
804 n_alloc = 5;
805 }
806
807 bandmat *m = (bandmat *)malloc(sizeof(bandmat) * n_alloc);
808 double *v = (double *)malloc(sizeof(double) * n_alloc);
809 int *perm = (int *)malloc(sizeof(int) * n_alloc);
810
811 for (unsigned i = 0; i < 10; i++) {
812 double norm = spiro_iter(s, m, perm, v, nseg);
813#ifdef VERBOSE
814 printf("%% norm = %g\n", norm);
815#endif
816 if (norm < 1e-12) break;
817 }
818
819 free(m);
820 free(v);
821 free(perm);
822 return 0;
823}
824
825static void
826spiro_seg_to_otherpath(const double ks[4],
827 double x0, double y0, double x1, double y1,
828 ConverterPath &bc, int depth, bool close_last)
829{
830 double bend = fabs(ks[0]) + fabs(.5 * ks[1]) + fabs(.125 * ks[2]) +
831 fabs((1./48) * ks[3]);
832
833 if (!(bend > 1e-8)) {
834 bc.lineto(x1, y1, close_last);
835 } else {
836 double seg_ch = hypot(x1 - x0, y1 - y0);
837 double seg_th = atan2(y1 - y0, x1 - x0);
838 double xy[2];
839 double ch, th;
840 double scale, rot;
841
842 integrate_spiro(ks, xy);
843 ch = hypot(xy[0], xy[1]);
844 th = atan2(xy[1], xy[0]);
845 scale = seg_ch / ch;
846 rot = seg_th - th;
847 if (depth > 5 || bend < 1.) {
848 double ul, vl;
849 double ur, vr;
850 double th_even, th_odd;
851 th_even = (1./384) * ks[3] + (1./8) * ks[1] + rot;
852 th_odd = (1./48) * ks[2] + .5 * ks[0];
853 ul = (scale * (1./3)) * cos(th_even - th_odd);
854 vl = (scale * (1./3)) * sin(th_even - th_odd);
855 ur = (scale * (1./3)) * cos(th_even + th_odd);
856 vr = (scale * (1./3)) * sin(th_even + th_odd);
857 bc.curveto(x0 + ul, y0 + vl, x1 - ur, y1 - vr, x1, y1, close_last);
858 } else {
859 /* subdivide */
860 double ksub[4];
861 double thsub;
862 double xysub[2];
863 double xmid, ymid;
864 double cth, sth;
865
866 ksub[0] = .5 * ks[0] - .125 * ks[1] + (1./64) * ks[2] - (1./768) * ks[3];
867 ksub[1] = .25 * ks[1] - (1./16) * ks[2] + (1./128) * ks[3];
868 ksub[2] = .125 * ks[2] - (1./32) * ks[3];
869 ksub[3] = (1./16) * ks[3];
870 thsub = rot - .25 * ks[0] + (1./32) * ks[1] - (1./384) * ks[2] + (1./6144) * ks[3];
871 cth = .5 * scale * cos(thsub);
872 sth = .5 * scale * sin(thsub);
873 integrate_spiro(ksub, xysub);
874 xmid = x0 + cth * xysub[0] - sth * xysub[1];
875 ymid = y0 + cth * xysub[1] + sth * xysub[0];
876 spiro_seg_to_otherpath(ksub, x0, y0, xmid, ymid, bc, depth + 1, false);
877 ksub[0] += .25 * ks[1] + (1./384) * ks[3];
878 ksub[1] += .125 * ks[2];
879 ksub[2] += (1./16) * ks[3];
880 spiro_seg_to_otherpath(ksub, xmid, ymid, x1, y1, bc, depth + 1, close_last);
881 }
882 }
883}
884
885spiro_seg *
886run_spiro(const spiro_cp *src, int n)
887{
888 int nseg = src[0].ty == '{' ? n - 1 : n;
889 spiro_seg *s = setup_path(src, n);
890 if (nseg > 1)
891 solve_spiro(s, nseg);
892 return s;
893}
894
895void
897{
898 free(s);
899}
900
901void
903{
904 int nsegs = s[n - 1].ty == '}' ? n - 1 : n;
905
906 for (int i = 0; i < nsegs; i++) {
907 double x0 = s[i].x;
908 double y0 = s[i].y;
909 double x1 = s[i + 1].x;
910 double y1 = s[i + 1].y;
911
912 if (i == 0) {
913 bc.moveto(x0, y0);
914 }
915 // on the last segment, set the 'close_last' flag if path is closed
916 spiro_seg_to_otherpath(s[i].ks, x0, y0, x1, y1, bc, 0, nsegs == n && i == n - 1);
917 }
918}
919
920double
921get_knot_th(const spiro_seg *s, int i)
922{
923 double ends[2][4];
924
925 if (i == 0) {
926 compute_ends(s[i].ks, ends, s[i].seg_ch);
927 return s[i].seg_th - ends[0][0];
928 } else {
929 compute_ends(s[i - 1].ks, ends, s[i - 1].seg_ch);
930 return s[i - 1].seg_th + ends[1][0];
931 }
932}
933
934
935} // namespace Spiro
936
937/************************************
938 * Unit_test code
939 */
940
941
942#ifdef UNIT_TEST
943#include <stdio.h>
944#include <sys/time.h> /* for gettimeofday */
945
946using namespace Spiro;
947
948static double
950{
951 struct timeval tv;
952 struct timezone tz;
953
954 gettimeofday (&tv, &tz);
955
956 return tv.tv_sec + 1e-6 * tv.tv_usec;
957}
958
959int
961 double ks[] = {1, 2, 3, 4};
962 double xy[2];
963 double xynom[2];
964 int i, j;
965 int nsubdiv;
966
967 n = ORDER < 6 ? 4096 : 1024;
968 integrate_spiro(ks, xynom);
969 nsubdiv = ORDER < 12 ? 8 : 7;
970 for (i = 0; i < nsubdiv; i++) {
971 double st, en;
972 double err;
973 int n_iter = (1 << (20 - i));
974
975 n = 1 << i;
976 st = get_time();
977 for (j = 0; j < n_iter; j++)
978 integrate_spiro(ks, xy);
979 en = get_time();
980 err = hypot(xy[0] - xynom[0], xy[1] - xynom[1]);
981 printf("%d %d %g %g\n", ORDER, n, (en - st) / n_iter, err);
982#if 0
983 double ch, th;
984 ch = hypot(xy[0], xy[1]);
985 th = atan2(xy[1], xy[0]);
986 printf("n = %d: integ(%g %g %g %g) = %g %g, ch = %g, th = %g\n", n,
987 ks[0], ks[1], ks[2], ks[3], xy[0], xy[1], ch, th);
988 printf("%d: %g %g\n", n, xy[0] - xynom[0], xy[1] - xynom[1]);
989#endif
990 }
991 return 0;
992}
993
994void
995print_seg(const double ks[4], double x0, double y0, double x1, double y1)
996{
997 double bend = fabs(ks[0]) + fabs(.5 * ks[1]) + fabs(.125 * ks[2]) +
998 fabs((1./48) * ks[3]);
999
1000 if (bend < 1e-8) {
1001 printf("%g %g lineto\n", x1, y1);
1002 } else {
1003 double seg_ch = hypot(x1 - x0, y1 - y0);
1004 double seg_th = atan2(y1 - y0, x1 - x0);
1005 double xy[2];
1006 double ch, th;
1007 double scale, rot;
1008
1009 integrate_spiro(ks, xy);
1010 ch = hypot(xy[0], xy[1]);
1011 th = atan2(xy[1], xy[0]);
1012 scale = seg_ch / ch;
1013 rot = seg_th - th;
1014 if (bend < 1.) {
1015 double th_even, th_odd;
1016 double ul, vl;
1017 double ur, vr;
1018 th_even = (1./384) * ks[3] + (1./8) * ks[1] + rot;
1019 th_odd = (1./48) * ks[2] + .5 * ks[0];
1020 ul = (scale * (1./3)) * cos(th_even - th_odd);
1021 vl = (scale * (1./3)) * sin(th_even - th_odd);
1022 ur = (scale * (1./3)) * cos(th_even + th_odd);
1023 vr = (scale * (1./3)) * sin(th_even + th_odd);
1024 printf("%g %g %g %g %g %g curveto\n",
1025 x0 + ul, y0 + vl, x1 - ur, y1 - vr, x1, y1);
1026
1027 } else {
1028 /* subdivide */
1029 double ksub[4];
1030 double thsub;
1031 double xysub[2];
1032 double xmid, ymid;
1033 double cth, sth;
1034
1035 ksub[0] = .5 * ks[0] - .125 * ks[1] + (1./64) * ks[2] - (1./768) * ks[3];
1036 ksub[1] = .25 * ks[1] - (1./16) * ks[2] + (1./128) * ks[3];
1037 ksub[2] = .125 * ks[2] - (1./32) * ks[3];
1038 ksub[3] = (1./16) * ks[3];
1039 thsub = rot - .25 * ks[0] + (1./32) * ks[1] - (1./384) * ks[2] + (1./6144) * ks[3];
1040 cth = .5 * scale * cos(thsub);
1041 sth = .5 * scale * sin(thsub);
1042 integrate_spiro(ksub, xysub);
1043 xmid = x0 + cth * xysub[0] - sth * xysub[1];
1044 ymid = y0 + cth * xysub[1] + sth * xysub[0];
1045 print_seg(ksub, x0, y0, xmid, ymid);
1046 ksub[0] += .25 * ks[1] + (1./384) * ks[3];
1047 ksub[1] += .125 * ks[2];
1048 ksub[2] += (1./16) * ks[3];
1049 print_seg(ksub, xmid, ymid, x1, y1);
1050 }
1051 }
1052}
1053
1054void
1055print_segs(const spiro_seg *segs, int nsegs)
1056{
1057 int i;
1058
1059 for (i = 0; i < nsegs; i++) {
1060 double x0 = segs[i].x;
1061 double y0 = segs[i].y;
1062 double x1 = segs[i + 1].x;
1063 double y1 = segs[i + 1].y;
1064
1065 if (i == 0)
1066 printf("%g %g moveto\n", x0, y0);
1067 printf("%% ks = [ %g %g %g %g ]\n",
1068 segs[i].ks[0], segs[i].ks[1], segs[i].ks[2], segs[i].ks[3]);
1069 print_seg(segs[i].ks, x0, y0, x1, y1);
1070 }
1071 printf("stroke\n");
1072}
1073
1074int
1076{
1077 spiro_cp path[] = {
1078 {334, 117, 'v'},
1079 {305, 176, 'v'},
1080 {212, 142, 'c'},
1081 {159, 171, 'c'},
1082 {224, 237, 'c'},
1083 {347, 335, 'c'},
1084 {202, 467, 'c'},
1085 {81, 429, 'v'},
1086 {114, 368, 'v'},
1087 {201, 402, 'c'},
1088 {276, 369, 'c'},
1089 {218, 308, 'c'},
1090 {91, 211, 'c'},
1091 {124, 111, 'c'},
1092 {229, 82, 'c'}
1093 };
1094 spiro_seg *segs;
1095 int i;
1096
1097 n = 1;
1098 for (i = 0; i < 1000; i++) {
1099 segs = setup_path(path, 15);
1100 solve_spiro(segs, 15);
1101 }
1102 printf("100 800 translate 1 -1 scale 1 setlinewidth\n");
1103 print_segs(segs, 15);
1104 printf("showpage\n");
1105 return 0;
1106}
1107
1108int main(int argc, char **argv)
1109{
1110 return test_curve();
1111}
1112#endif
double scale
Definition aa.cpp:228
int main()
Sequence of contiguous curves, aka spline.
Definition path.h:353
Converts Spiro to 2Geom's Path.
void curveto(double x1, double y1, double x2, double y2, double x3, double y3, bool close_last)
void lineto(double x, double y, bool close_last)
void moveto(double x, double y)
Piecewise< D2< SBasis > > bend(Piecewise< D2< SBasis > > const &f, Piecewise< SBasis > bending)
Definition hatches.cpp:255
auto floor(Geom::Rect const &rect)
Definition geom.h:131
void free_spiro(spiro_seg *s)
Definition spiro.cpp:896
double get_knot_th(const spiro_seg *s, int i)
Definition spiro.cpp:921
void spiro_to_otherpath(const spiro_seg *s, int n, ConverterPath &bc)
Definition spiro.cpp:902
static double mod_2pi(double th)
Definition spiro.cpp:489
struct spiro_seg_s spiro_seg
Definition spiro.h:29
static spiro_seg * setup_path(const spiro_cp *src, int n)
Definition spiro.cpp:496
int n
Definition spiro.cpp:57
static void add_mat_line(bandmat *m, double *v, double derivs[4], double x, double y, int j, int jj, int jinc, int nmat)
Definition spiro.cpp:637
static double spiro_iter(spiro_seg *s, bandmat *m, int *perm, double *v, const int n)
Definition spiro.cpp:658
Geom::Path spiro_run(const spiro_cp *src, int src_len)
Definition spiro.cpp:23
static int count_vec(const spiro_seg *s, int nseg)
Definition spiro.cpp:626
spiro_seg * run_spiro(const spiro_cp *src, int n)
Definition spiro.cpp:886
static void integrate_spiro(const double ks[4], double xy[2])
Definition spiro.cpp:65
static void spiro_seg_to_otherpath(const double ks[4], double x0, double y0, double x1, double y1, ConverterPath &bc, int depth, bool close_last)
Definition spiro.cpp:826
static void compute_pderivs(const spiro_seg *s, double ends[2][4], double derivs[4][2][4], int jinc)
Definition spiro.cpp:467
static void bandec11(bandmat *m, int *perm, int n)
Definition spiro.cpp:535
static void banbks11(const bandmat *m, const int *perm, double *v, int n)
Definition spiro.cpp:583
static int compute_jinc(char ty0, char ty1)
Definition spiro.cpp:612
static double compute_ends(const double ks[4], double ends[2][4], double seg_ch)
Definition spiro.cpp:429
static int solve_spiro(spiro_seg *s, const int nseg)
Definition spiro.cpp:792
void print_seg(const double ks[4], double x0, double y0, double x1, double y1)
Definition spiro.cpp:995
void print_segs(const spiro_seg *segs, int nsegs)
Definition spiro.cpp:1055
int test_curve(void)
Definition spiro.cpp:1075
static double get_time(void)
Definition spiro.cpp:949
int test_integ(void)
Definition spiro.cpp:960
C implementation of third-order polynomial spirals.
double y
Definition spiro.h:22
double x
Definition spiro.h:21
int delta