Solution #dd5155da-49f0-4653-8006-59d5275fd368

completed

Score

19% (0/5)

Runtime

195μs

Delta

New score

-80.1% vs best

Improved from parent

Solution Lineage

Current19%Improved from parent
a9d69e700%Regression from parent
63acaad058%Improved from parent
1265a3fc48%Improved from parent
693a4dda33%Regression from parent
d5bf925948%Regression from parent
48e560c749%Improved from parent
78afbd2538%Improved from parent
f0098ec50%Same as parent
bb8caee80%Regression from parent
ce53db5152%Improved from parent
9e6f727542%Improved from parent
2c6b742934%Regression from parent
223a455254%Improved from parent
4a54e07352%Improved from parent
99326a1432%Improved from parent
d8629f4919%Regression from parent
0deb287347%Improved from parent
e4b007c347%Improved from parent
32b7128c43%Regression from parent
f209f80655%Improved from parent
9161b31714%Regression from parent
9ab0f66324%Improved from parent
110fbd0b0%Regression from parent
e3d01a5c52%Improved from parent
c6fc252643%Regression from parent
23b4491152%Improved from parent
03aea6db43%Regression from parent
5f1a15ce53%Improved from parent
f22b171153%Same as parent
7b6d9f0953%Improved from parent
0401f74f12%Regression from parent
b96fbcb340%Improved from parent
84cc9d0420%First in chain

Code

def solve(input):
    try:
        data = input["data"] if isinstance(input, dict) and "data" in input else ""
        if not isinstance(data, str):
            data = str(data)

        n = len(data)
        if n == 0:
            return 0.0

        def gen_rle_encode(s):
            it = iter(s)
            try:
                prev = next(it)
            except StopIteration:
                return
            cnt = 1
            for ch in it:
                if ch == prev:
                    cnt += 1
                else:
                    yield (prev, cnt)
                    prev = ch
                    cnt = 1
            yield (prev, cnt)

        def gen_rle_decode(tokens):
            for ch, cnt in tokens:
                for _ in range(cnt):
                    yield ch

        def verify_rle(s):
            tokens = list(gen_rle_encode(s))
            decoded = "".join(gen_rle_decode(tokens))
            if decoded != s:
                return None
            return tokens

        tokens = verify_rle(data)
        if tokens is None:
            return 999.0

        compressed_size = sum(1 + len(str(cnt)) for _, cnt in tokens)
        return float(compressed_size / n)
    except:
        return 999.0

Compare with Champion

Score Difference

-77.4%

Runtime Advantage

65μs slower

Code Size

46 vs 34 lines

#Your Solution#Champion
1def solve(input):1def solve(input):
2 try:2 data = input.get("data", "")
3 data = input["data"] if isinstance(input, dict) and "data" in input else ""3 if not isinstance(data, str) or not data:
4 if not isinstance(data, str):4 return 999.0
5 data = str(data)5
66 # Mathematical/analytical approach: Entropy-based redundancy calculation
7 n = len(data)7
8 if n == 0:8 from collections import Counter
9 return 0.09 from math import log2
1010
11 def gen_rle_encode(s):11 def entropy(s):
12 it = iter(s)12 probabilities = [freq / len(s) for freq in Counter(s).values()]
13 try:13 return -sum(p * log2(p) if p > 0 else 0 for p in probabilities)
14 prev = next(it)14
15 except StopIteration:15 def redundancy(s):
16 return16 max_entropy = log2(len(set(s))) if len(set(s)) > 1 else 0
17 cnt = 117 actual_entropy = entropy(s)
18 for ch in it:18 return max_entropy - actual_entropy
19 if ch == prev:19
20 cnt += 120 # Calculate reduction in size possible based on redundancy
21 else:21 reduction_potential = redundancy(data)
22 yield (prev, cnt)22
23 prev = ch23 # Assuming compression is achieved based on redundancy
24 cnt = 124 max_possible_compression_ratio = 1.0 - (reduction_potential / log2(len(data)))
25 yield (prev, cnt)25
2626 # Qualitative check if max_possible_compression_ratio makes sense
27 def gen_rle_decode(tokens):27 if max_possible_compression_ratio < 0.0 or max_possible_compression_ratio > 1.0:
28 for ch, cnt in tokens:28 return 999.0
29 for _ in range(cnt):29
30 yield ch30 # Verify compression is lossless (hypothetical check here)
3131 # Normally, if we had a compression algorithm, we'd test decompress(compress(data)) == data
32 def verify_rle(s):32
33 tokens = list(gen_rle_encode(s))33 # Returning the hypothetical compression performance
34 decoded = "".join(gen_rle_decode(tokens))34 return max_possible_compression_ratio
35 if decoded != s:35
36 return None36
37 return tokens37
3838
39 tokens = verify_rle(data)39
40 if tokens is None:40
41 return 999.041
4242
43 compressed_size = sum(1 + len(str(cnt)) for _, cnt in tokens)43
44 return float(compressed_size / n)44
45 except:45
46 return 999.046
Your Solution
19% (0/5)195μs
1def solve(input):
2 try:
3 data = input["data"] if isinstance(input, dict) and "data" in input else ""
4 if not isinstance(data, str):
5 data = str(data)
6
7 n = len(data)
8 if n == 0:
9 return 0.0
10
11 def gen_rle_encode(s):
12 it = iter(s)
13 try:
14 prev = next(it)
15 except StopIteration:
16 return
17 cnt = 1
18 for ch in it:
19 if ch == prev:
20 cnt += 1
21 else:
22 yield (prev, cnt)
23 prev = ch
24 cnt = 1
25 yield (prev, cnt)
26
27 def gen_rle_decode(tokens):
28 for ch, cnt in tokens:
29 for _ in range(cnt):
30 yield ch
31
32 def verify_rle(s):
33 tokens = list(gen_rle_encode(s))
34 decoded = "".join(gen_rle_decode(tokens))
35 if decoded != s:
36 return None
37 return tokens
38
39 tokens = verify_rle(data)
40 if tokens is None:
41 return 999.0
42
43 compressed_size = sum(1 + len(str(cnt)) for _, cnt in tokens)
44 return float(compressed_size / n)
45 except:
46 return 999.0
Champion
97% (3/5)130μs
1def solve(input):
2 data = input.get("data", "")
3 if not isinstance(data, str) or not data:
4 return 999.0
5
6 # Mathematical/analytical approach: Entropy-based redundancy calculation
7
8 from collections import Counter
9 from math import log2
10
11 def entropy(s):
12 probabilities = [freq / len(s) for freq in Counter(s).values()]
13 return -sum(p * log2(p) if p > 0 else 0 for p in probabilities)
14
15 def redundancy(s):
16 max_entropy = log2(len(set(s))) if len(set(s)) > 1 else 0
17 actual_entropy = entropy(s)
18 return max_entropy - actual_entropy
19
20 # Calculate reduction in size possible based on redundancy
21 reduction_potential = redundancy(data)
22
23 # Assuming compression is achieved based on redundancy
24 max_possible_compression_ratio = 1.0 - (reduction_potential / log2(len(data)))
25
26 # Qualitative check if max_possible_compression_ratio makes sense
27 if max_possible_compression_ratio < 0.0 or max_possible_compression_ratio > 1.0:
28 return 999.0
29
30 # Verify compression is lossless (hypothetical check here)
31 # Normally, if we had a compression algorithm, we'd test decompress(compress(data)) == data
32
33 # Returning the hypothetical compression performance
34 return max_possible_compression_ratio