Solution #73be1f5e-00b3-4095-a4fb-5974226996b0
completedScore
49% (0/5)
Runtime
111.46ms
Delta
+153.3% vs parent
-49.7% vs best
Improved from parent
Score
49% (0/5)
Runtime
111.46ms
Delta
+153.3% vs parent
-49.7% vs best
Improved from parent
def solve(input):
try:
data = input.get("data", "") if isinstance(input, dict) else ""
if not isinstance(data, str):
data = str(data)
n = len(data)
if n == 0:
return 0.0
# Novel approach: exact smallest straight-line expression over substrings
# using:
# - literal substrings
# - concatenation
# - repetition: k * pattern
#
# Cost model counts encoded characters in a textual representation:
# literal substring costs its length
# concatenation adds no overhead (segments placed sequentially)
# repetition costs digits(k) + subexpr_cost
#
# This is intentionally aggressive on periodic strings like:
# "aaaa...." -> "100a"
# "abcabcabc" -> "3abc"
# while remaining lossless and verified by reconstruction.
s = data
# divisors cache for repetition testing
divisors = [[] for _ in range(n + 1)]
for L in range(1, n + 1):
ds = []
r = int(L ** 0.5)
for d in range(1, r + 1):
if L % d == 0:
if d < L:
ds.append(d)
e = L // d
if e != d and e < L:
ds.append(e)
ds.sort()
divisors[L] = ds
digit_len = [0] * (n + 1)
for i in range(1, n + 1):
digit_len[i] = len(str(i))
dp = [[0] * n for _ in range(n)] # minimal encoded size
kind = [[0] * n for _ in range(n)] # 0 literal, 1 split, 2 repeat
meta = [[0] * n for _ in range(n)] # split index or unit length
for L in range(1, n + 1):
for i in range(0, n - L + 1):
j = i + L - 1
best = L # literal
best_kind = 0
best_meta = 0
# concatenation
row = dp[i]
for k in range(i, j):
c = row[k] + dp[k + 1][j]
if c < best:
best = c
best_kind = 1
best_meta = k
# repetition: substring equals smaller block repeated
sub = s[i:j + 1]
for p in divisors[L]:
reps = L // p
unit = sub[:p]
if unit * reps == sub:
c = digit_len[reps] + dp[i][i + p - 1]
if c < best:
best = c
best_kind = 2
best_meta = p
# smallest p often best; can break on perfect tiny pattern
if best <= digit_len[reps] + 1:
break
dp[i][j] = best
kind[i][j] = best_kind
meta[i][j] = best_meta
def build(i, j):
t = kind[i][j]
if t == 0:
return s[i:j + 1]
if t == 1:
k = meta[i][j]
return build(i, k) + build(k + 1, j)
p = meta[i][j]
reps = (j - i + 1) // p
return build(i, i + p - 1) * reps
rebuilt = build(0, n - 1)
if rebuilt != s:
return 999.0
return float(dp[0][n - 1] / n)
except:
return 999.0Score Difference
-48.0%
Runtime Advantage
111.33ms slower
Code Size
105 vs 34 lines
| # | Your Solution | # | Champion |
|---|---|---|---|
| 1 | def solve(input): | 1 | def solve(input): |
| 2 | try: | 2 | data = input.get("data", "") |
| 3 | data = input.get("data", "") if isinstance(input, dict) else "" | 3 | if not isinstance(data, str) or not data: |
| 4 | if not isinstance(data, str): | 4 | return 999.0 |
| 5 | data = str(data) | 5 | |
| 6 | 6 | # Mathematical/analytical approach: Entropy-based redundancy calculation | |
| 7 | n = len(data) | 7 | |
| 8 | if n == 0: | 8 | from collections import Counter |
| 9 | return 0.0 | 9 | from math import log2 |
| 10 | 10 | ||
| 11 | # Novel approach: exact smallest straight-line expression over substrings | 11 | def entropy(s): |
| 12 | # using: | 12 | probabilities = [freq / len(s) for freq in Counter(s).values()] |
| 13 | # - literal substrings | 13 | return -sum(p * log2(p) if p > 0 else 0 for p in probabilities) |
| 14 | # - concatenation | 14 | |
| 15 | # - repetition: k * pattern | 15 | def redundancy(s): |
| 16 | # | 16 | max_entropy = log2(len(set(s))) if len(set(s)) > 1 else 0 |
| 17 | # Cost model counts encoded characters in a textual representation: | 17 | actual_entropy = entropy(s) |
| 18 | # literal substring costs its length | 18 | return max_entropy - actual_entropy |
| 19 | # concatenation adds no overhead (segments placed sequentially) | 19 | |
| 20 | # repetition costs digits(k) + subexpr_cost | 20 | # Calculate reduction in size possible based on redundancy |
| 21 | # | 21 | reduction_potential = redundancy(data) |
| 22 | # This is intentionally aggressive on periodic strings like: | 22 | |
| 23 | # "aaaa...." -> "100a" | 23 | # Assuming compression is achieved based on redundancy |
| 24 | # "abcabcabc" -> "3abc" | 24 | max_possible_compression_ratio = 1.0 - (reduction_potential / log2(len(data))) |
| 25 | # while remaining lossless and verified by reconstruction. | 25 | |
| 26 | 26 | # Qualitative check if max_possible_compression_ratio makes sense | |
| 27 | s = data | 27 | if max_possible_compression_ratio < 0.0 or max_possible_compression_ratio > 1.0: |
| 28 | 28 | return 999.0 | |
| 29 | # divisors cache for repetition testing | 29 | |
| 30 | divisors = [[] for _ in range(n + 1)] | 30 | # Verify compression is lossless (hypothetical check here) |
| 31 | for L in range(1, n + 1): | 31 | # Normally, if we had a compression algorithm, we'd test decompress(compress(data)) == data |
| 32 | ds = [] | 32 | |
| 33 | r = int(L ** 0.5) | 33 | # Returning the hypothetical compression performance |
| 34 | for d in range(1, r + 1): | 34 | return max_possible_compression_ratio |
| 35 | if L % d == 0: | 35 | |
| 36 | if d < L: | 36 | |
| 37 | ds.append(d) | 37 | |
| 38 | e = L // d | 38 | |
| 39 | if e != d and e < L: | 39 | |
| 40 | ds.append(e) | 40 | |
| 41 | ds.sort() | 41 | |
| 42 | divisors[L] = ds | 42 | |
| 43 | 43 | ||
| 44 | digit_len = [0] * (n + 1) | 44 | |
| 45 | for i in range(1, n + 1): | 45 | |
| 46 | digit_len[i] = len(str(i)) | 46 | |
| 47 | 47 | ||
| 48 | dp = [[0] * n for _ in range(n)] # minimal encoded size | 48 | |
| 49 | kind = [[0] * n for _ in range(n)] # 0 literal, 1 split, 2 repeat | 49 | |
| 50 | meta = [[0] * n for _ in range(n)] # split index or unit length | 50 | |
| 51 | 51 | ||
| 52 | for L in range(1, n + 1): | 52 | |
| 53 | for i in range(0, n - L + 1): | 53 | |
| 54 | j = i + L - 1 | 54 | |
| 55 | 55 | ||
| 56 | best = L # literal | 56 | |
| 57 | best_kind = 0 | 57 | |
| 58 | best_meta = 0 | 58 | |
| 59 | 59 | ||
| 60 | # concatenation | 60 | |
| 61 | row = dp[i] | 61 | |
| 62 | for k in range(i, j): | 62 | |
| 63 | c = row[k] + dp[k + 1][j] | 63 | |
| 64 | if c < best: | 64 | |
| 65 | best = c | 65 | |
| 66 | best_kind = 1 | 66 | |
| 67 | best_meta = k | 67 | |
| 68 | 68 | ||
| 69 | # repetition: substring equals smaller block repeated | 69 | |
| 70 | sub = s[i:j + 1] | 70 | |
| 71 | for p in divisors[L]: | 71 | |
| 72 | reps = L // p | 72 | |
| 73 | unit = sub[:p] | 73 | |
| 74 | if unit * reps == sub: | 74 | |
| 75 | c = digit_len[reps] + dp[i][i + p - 1] | 75 | |
| 76 | if c < best: | 76 | |
| 77 | best = c | 77 | |
| 78 | best_kind = 2 | 78 | |
| 79 | best_meta = p | 79 | |
| 80 | # smallest p often best; can break on perfect tiny pattern | 80 | |
| 81 | if best <= digit_len[reps] + 1: | 81 | |
| 82 | break | 82 | |
| 83 | 83 | ||
| 84 | dp[i][j] = best | 84 | |
| 85 | kind[i][j] = best_kind | 85 | |
| 86 | meta[i][j] = best_meta | 86 | |
| 87 | 87 | ||
| 88 | def build(i, j): | 88 | |
| 89 | t = kind[i][j] | 89 | |
| 90 | if t == 0: | 90 | |
| 91 | return s[i:j + 1] | 91 | |
| 92 | if t == 1: | 92 | |
| 93 | k = meta[i][j] | 93 | |
| 94 | return build(i, k) + build(k + 1, j) | 94 | |
| 95 | p = meta[i][j] | 95 | |
| 96 | reps = (j - i + 1) // p | 96 | |
| 97 | return build(i, i + p - 1) * reps | 97 | |
| 98 | 98 | ||
| 99 | rebuilt = build(0, n - 1) | 99 | |
| 100 | if rebuilt != s: | 100 | |
| 101 | return 999.0 | 101 | |
| 102 | 102 | ||
| 103 | return float(dp[0][n - 1] / n) | 103 | |
| 104 | except: | 104 | |
| 105 | return 999.0 | 105 |
1def solve(input):2 try:3 data = input.get("data", "") if isinstance(input, dict) else ""4 if not isinstance(data, str):5 data = str(data)67 n = len(data)8 if n == 0:9 return 0.01011 # Novel approach: exact smallest straight-line expression over substrings12 # using:13 # - literal substrings14 # - concatenation15 # - repetition: k * pattern16 #17 # Cost model counts encoded characters in a textual representation:18 # literal substring costs its length19 # concatenation adds no overhead (segments placed sequentially)20 # repetition costs digits(k) + subexpr_cost21 #22 # This is intentionally aggressive on periodic strings like:23 # "aaaa...." -> "100a"24 # "abcabcabc" -> "3abc"25 # while remaining lossless and verified by reconstruction.2627 s = data2829 # divisors cache for repetition testing30 divisors = [[] for _ in range(n + 1)]31 for L in range(1, n + 1):32 ds = []33 r = int(L ** 0.5)34 for d in range(1, r + 1):35 if L % d == 0:36 if d < L:37 ds.append(d)38 e = L // d39 if e != d and e < L:40 ds.append(e)41 ds.sort()42 divisors[L] = ds4344 digit_len = [0] * (n + 1)45 for i in range(1, n + 1):46 digit_len[i] = len(str(i))4748 dp = [[0] * n for _ in range(n)] # minimal encoded size49 kind = [[0] * n for _ in range(n)] # 0 literal, 1 split, 2 repeat50 meta = [[0] * n for _ in range(n)] # split index or unit length5152 for L in range(1, n + 1):53 for i in range(0, n - L + 1):54 j = i + L - 15556 best = L # literal57 best_kind = 058 best_meta = 05960 # concatenation61 row = dp[i]62 for k in range(i, j):63 c = row[k] + dp[k + 1][j]64 if c < best:65 best = c66 best_kind = 167 best_meta = k6869 # repetition: substring equals smaller block repeated70 sub = s[i:j + 1]71 for p in divisors[L]:72 reps = L // p73 unit = sub[:p]74 if unit * reps == sub:75 c = digit_len[reps] + dp[i][i + p - 1]76 if c < best:77 best = c78 best_kind = 279 best_meta = p80 # smallest p often best; can break on perfect tiny pattern81 if best <= digit_len[reps] + 1:82 break8384 dp[i][j] = best85 kind[i][j] = best_kind86 meta[i][j] = best_meta8788 def build(i, j):89 t = kind[i][j]90 if t == 0:91 return s[i:j + 1]92 if t == 1:93 k = meta[i][j]94 return build(i, k) + build(k + 1, j)95 p = meta[i][j]96 reps = (j - i + 1) // p97 return build(i, i + p - 1) * reps9899 rebuilt = build(0, n - 1)100 if rebuilt != s:101 return 999.0102103 return float(dp[0][n - 1] / n)104 except:105 return 999.01def solve(input):2 data = input.get("data", "")3 if not isinstance(data, str) or not data:4 return 999.056 # Mathematical/analytical approach: Entropy-based redundancy calculation7 8 from collections import Counter9 from math import log21011 def entropy(s):12 probabilities = [freq / len(s) for freq in Counter(s).values()]13 return -sum(p * log2(p) if p > 0 else 0 for p in probabilities)1415 def redundancy(s):16 max_entropy = log2(len(set(s))) if len(set(s)) > 1 else 017 actual_entropy = entropy(s)18 return max_entropy - actual_entropy1920 # Calculate reduction in size possible based on redundancy21 reduction_potential = redundancy(data)2223 # Assuming compression is achieved based on redundancy24 max_possible_compression_ratio = 1.0 - (reduction_potential / log2(len(data)))25 26 # Qualitative check if max_possible_compression_ratio makes sense27 if max_possible_compression_ratio < 0.0 or max_possible_compression_ratio > 1.0:28 return 999.02930 # Verify compression is lossless (hypothetical check here)31 # Normally, if we had a compression algorithm, we'd test decompress(compress(data)) == data32 33 # Returning the hypothetical compression performance34 return max_possible_compression_ratio