Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 10 additions & 1 deletion codespeed/admin.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,15 @@ class ProjectAdmin(admin.ModelAdmin):
class BranchAdmin(admin.ModelAdmin):
list_display = ('name', 'project', 'display_on_comparison_page')
list_filter = ('project',)
actions = ['enable_comparison_page', 'disable_comparison_page']

@admin.action(description='Display selected branches on comparison page')
def enable_comparison_page(self, request, queryset):
queryset.update(display_on_comparison_page=True)

@admin.action(description='Hide selected branches from comparison page')
def disable_comparison_page(self, request, queryset):
queryset.update(display_on_comparison_page=False)


@admin.register(Revision)
Expand All @@ -58,7 +67,7 @@ class ExecutableAdmin(admin.ModelAdmin):

@admin.register(Benchmark)
class BenchmarkAdmin(admin.ModelAdmin):
list_display = ('name', 'benchmark_type', 'data_type', 'description',
list_display = ('name', 'source', 'data_type', 'description',
'units_title', 'units', 'lessisbetter',
'default_on_comparison')
list_filter = ('data_type', 'lessisbetter')
Expand Down
3 changes: 3 additions & 0 deletions codespeed/commits/git.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,9 +52,12 @@ def getlogs(endrev, startrev):
else:
logfmt = '--format=format:%h%x00%H%x00%at%x00%an%x00%ae%x00%s%x00%b%x1e'

max_log_entries = getattr(settings, 'GIT_MAX_LOG_ENTRIES', 30)

cmd = ["git", "log", logfmt]

if endrev.commitid != startrev.commitid:
cmd.append("-n%d" % max_log_entries)
cmd.append("%s...%s" % (startrev.commitid, endrev.commitid))
else:
cmd.append("-1") # Only return one commit
Expand Down
2 changes: 1 addition & 1 deletion codespeed/fixtures/jruby.json

Large diffs are not rendered by default.

4 changes: 2 additions & 2 deletions codespeed/fixtures/testdata.json
Original file line number Diff line number Diff line change
Expand Up @@ -219,7 +219,7 @@
"fields": {
"name": "float",
"parent": null,
"benchmark_type": "C",
"source": "legacy",
"data_type": "U",
"description": "",
"units_title": "Time",
Expand All @@ -234,7 +234,7 @@
"fields": {
"name": "int",
"parent": null,
"benchmark_type": "C",
"source": "legacy",
"data_type": "U",
"description": "",
"units_title": "Time",
Expand Down
4 changes: 2 additions & 2 deletions codespeed/fixtures/timeline_tests.json
Original file line number Diff line number Diff line change
Expand Up @@ -210,7 +210,7 @@
"fields": {
"parent": null,
"name": "float",
"benchmark_type": "C",
"source": "legacy",
"default_on_comparison": true,
"units_title": "Time",
"units": "seconds",
Expand All @@ -224,7 +224,7 @@
"fields": {
"parent": null,
"name": "int",
"benchmark_type": "C",
"source": "legacy",
"default_on_comparison": true,
"units_title": "Time",
"units": "seconds",
Expand Down
36 changes: 36 additions & 0 deletions codespeed/migrations/0005_benchmark_source_result_suite_version.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
from django.db import migrations, models


def remap_benchmark_type(apps, schema_editor):
Benchmark = apps.get_model('codespeed', 'Benchmark')
Benchmark.objects.all().update(source='legacy')


class Migration(migrations.Migration):

dependencies = [
('codespeed', '0004_branch_display_on_comparison_page'),
]

operations = [
migrations.RenameField(
model_name='benchmark',
old_name='benchmark_type',
new_name='source',
),
migrations.AlterField(
model_name='benchmark',
name='source',
field=models.CharField(
choices=[('legacy', 'Legacy'), ('pyperformance', 'PyPerformance')],
default='legacy',
max_length=14,
),
),
migrations.RunPython(remap_benchmark_type, migrations.RunPython.noop),
migrations.AddField(
model_name='result',
name='suite_version',
field=models.CharField(blank=True, default='', max_length=50),
),
]
15 changes: 5 additions & 10 deletions codespeed/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -172,9 +172,9 @@ def __str__(self):


class Benchmark(models.Model):
B_TYPES = (
('C', 'Cross-project'),
('O', 'Own-project'),
S_TYPES = (
('legacy', 'Legacy'),
('pyperformance', 'PyPerformance'),
)
D_TYPES = (
('U', 'Mean'),
Expand All @@ -186,7 +186,7 @@ class Benchmark(models.Model):
'self', on_delete=models.CASCADE, verbose_name="parent",
help_text="allows to group benchmarks in hierarchies",
null=True, blank=True, default=None)
benchmark_type = models.CharField(max_length=1, choices=B_TYPES, default='C')
source = models.CharField(max_length=14, choices=S_TYPES, default='legacy')
data_type = models.CharField(max_length=1, choices=D_TYPES, default='U')
description = models.CharField(max_length=300, blank=True)
units_title = models.CharField(max_length=30, default='Time')
Expand All @@ -198,12 +198,6 @@ class Benchmark(models.Model):
def __str__(self):
return self.name

def clean(self):
if self.default_on_comparison and self.benchmark_type != 'C':
raise ValidationError("Only cross-project benchmarks are shown "
"on the comparison page. Deactivate "
"'default_on_comparison' first.")


class Environment(models.Model):
name = models.CharField(unique=True, max_length=100)
Expand All @@ -223,6 +217,7 @@ class Result(models.Model):
val_max = models.FloatField(blank=True, null=True)
q1 = models.FloatField(blank=True, null=True)
q3 = models.FloatField(blank=True, null=True)
suite_version = models.CharField(max_length=50, blank=True, default='')
date = models.DateTimeField(blank=True, null=True)
revision = models.ForeignKey(
Revision, on_delete=models.CASCADE, related_name="results")
Expand Down
3 changes: 3 additions & 0 deletions codespeed/results.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,8 @@ def save_result(data, update_repo=True):
b.units_title = data["units_title"]
if "lessisbetter" in data:
b.lessisbetter = data["lessisbetter"]
if "source" in data:
b.source = data["source"]
b.full_clean()
b.save()

Expand Down Expand Up @@ -127,6 +129,7 @@ def save_result(data, update_repo=True):
r.val_max = data.get('max')
r.q1 = data.get('q1')
r.q3 = data.get('q3')
r.suite_version = data.get('suite_version', '')

r.full_clean()
r.save()
Expand Down
2 changes: 2 additions & 0 deletions codespeed/static/css/main.css
Original file line number Diff line number Diff line change
Expand Up @@ -457,6 +457,8 @@ tbody.commits tr td.date {

a#permalink { float: right; font-size: small; }
a#permalink:hover { text-decoration: underline; }
a#exportcsv { float: right; font-size: small; margin-right: 1em; }
a#exportcsv:hover { text-decoration: underline; }

/* Plot styles */
div#plot { text-align: left; height: 500px; width: 100%; }
Expand Down
129 changes: 118 additions & 11 deletions codespeed/static/js/comparison.js
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ function getConfiguration() {
ben: readCheckbox("input[name='benchmarks']:checked"),
env: readCheckbox("input[name='environments']:checked"),
hor: $("input[name='direction']").is(':checked'),
bas: $("#baseline option:selected").val(),
bas: $("#baseline option:selected").val() || "none",
chart: $("#chart_type option:selected").val()
};
}
Expand Down Expand Up @@ -52,6 +52,22 @@ function refreshContent() {
msg = '<p class="warning">Normalized stacked bars actually represent the weighted arithmetic sum, useful to spot which individual benchmarks take up the most time. Choosing different weightings from the "Normalization" menu will change the totals relative to one another. For the correct way to calculate total bars, the geometric mean must be used (see <a href="http://portal.acm.org/citation.cfm?id=5666.5673 " title="How not to lie with statistics: the correct way to summarize benchmark results">paper</a>)</p>';
}

if (compdata && compdata.suite_versions) {
var mismatchedEnvs = enviros.filter(function(envId) {
var versions = new Set();
exes.forEach(function(exeKey) {
var sv = compdata.suite_versions[exeKey];
if (sv && sv[envId]) {
sv[envId].forEach(function(v) { versions.add(v); });
}
});
return versions.size > 1;
});
if (mismatchedEnvs.length > 0) {
msg += '<p class="warning">The executables being compared used different benchmark suite versions. Results may not be directly comparable.</p>';
}
}

chartInstances.forEach(function(c) { c.destroy(); });
chartInstances = [];
$("#plotwrapper").fadeOut("fast", function() {
Expand Down Expand Up @@ -90,10 +106,19 @@ function updateBaselineDropdown() {
var $baseline = $("#baseline");
var current = $baseline.val();
$baseline.find("option:not([value='none'])").remove();
var enviros = readCheckbox("input[name='environments']:checked").split(",").filter(Boolean);
var multiEnv = enviros.length > 1;
$("input[name='executables']:checked").each(function() {
var key = $(this).val();
var name = $(this).next('label').text().trim();
$baseline.append($('<option>').val(key).text(name));
if (multiEnv) {
enviros.forEach(function(envId) {
var envName = $("label[for='env_" + envId + "']").text().trim();
$baseline.append($('<option>').val(key + ':' + envId).text(name + ' @ ' + envName));
});
} else {
$baseline.append($('<option>').val(key).text(name));
}
});
if ($baseline.find("option[value='" + current + "']").length) {
$baseline.val(current);
Expand All @@ -104,7 +129,7 @@ function updateBaselineDropdown() {

function loadData() {
var conf = getConfiguration();
if (!conf.exe || !conf.ben) { return; }
if (!conf.exe || !conf.ben) { refreshContent(); return; }
var cacheKey = conf.exe + "|" + conf.ben;
if (dataCache[cacheKey]) {
compdata = dataCache[cacheKey];
Expand All @@ -119,7 +144,22 @@ function loadData() {
}

function renderComparisonPlot(plotid, unit, benchmarks, exes, enviros, baseline, chart, horizontal) {
var baselineLabel = baseline !== "none" ? $("label[for='exe_" + baseline + "']").text().trim() : "";
// baseline may be "exe_key" or "exe_key:env_id" (for cross-env normalization)
if (!baseline) { baseline = "none"; }
var baselineExe = baseline, baselineEnv = null;
if (baseline !== "none" && baseline.indexOf(':') !== -1) {
var bparts = baseline.split(':');
baselineExe = bparts[0];
baselineEnv = bparts[1];
}

var baselineLabel = "";
if (baseline !== "none") {
baselineLabel = $("label[for='exe_" + baselineExe + "']").text().trim();
if (baselineEnv !== null) {
baselineLabel += ' @ ' + $("label[for='env_" + baselineEnv + "']").text().trim();
}
}

var title;
if (baseline === "none") {
Expand Down Expand Up @@ -148,15 +188,17 @@ function renderComparisonPlot(plotid, unit, benchmarks, exes, enviros, baseline,
for (var i = 0; i < exes.length; i++) {
for (var j = 0; j < enviros.length; j++) {
var exeLabel = $("label[for='exe_" + exes[i] + "']").text().trim();
if (chart === "relative bars" && exes[i] === baseline) { continue; }
if (chart === "relative bars" && exes[i] === baselineExe &&
(baselineEnv === null || baselineEnv === enviros[j])) { continue; }
var data = [];
for (var b = 0; b < benchmarks.length; b++) {
var val = compdata[exes[i]] && compdata[exes[i]][enviros[j]]
? compdata[exes[i]][enviros[j]][benchmarks[b]]
: null;
if (val !== null && baseline !== "none") {
var baseval = compdata[baseline] && compdata[baseline][enviros[j]]
? compdata[baseline][enviros[j]][benchmarks[b]]
var envForBase = baselineEnv !== null ? baselineEnv : enviros[j];
var baseval = compdata[baselineExe] && compdata[baselineExe][envForBase]
? compdata[baselineExe][envForBase][benchmarks[b]]
: null;
val = (baseval === null || baseval === 0) ? null : val / baseval;
}
Expand Down Expand Up @@ -189,8 +231,9 @@ function renderComparisonPlot(plotid, unit, benchmarks, exes, enviros, baseline,
? compdata[exes[i]][enviros[j]][benchmarks[b]]
: null;
if (val !== null && baseline !== "none") {
var baseval = compdata[baseline] && compdata[baseline][enviros[j]]
? compdata[baseline][enviros[j]][benchmarks[b]]
var envForBase = baselineEnv !== null ? baselineEnv : enviros[j];
var baseval = compdata[baselineExe] && compdata[baselineExe][envForBase]
? compdata[baselineExe][envForBase][benchmarks[b]]
: null;
val = (baseval === null || baseval === 0) ? null : val / baseval;
}
Expand All @@ -204,6 +247,11 @@ function renderComparisonPlot(plotid, unit, benchmarks, exes, enviros, baseline,

if (datasets.length === 0) { return; }

// Mark datasets where all values are null (baseline has no data for that env)
datasets.forEach(function(ds) {
ds.allNull = ds.data.every(function(v) { return v === null; });
});

// Size the container
var wrapWidth = $("#plotwrapper").width();
var h = horizontal
Expand Down Expand Up @@ -247,7 +295,19 @@ function renderComparisonPlot(plotid, unit, benchmarks, exes, enviros, baseline,
font: {size: FONT_SIZE},
boxWidth: 20,
boxHeight: FONT_SIZE,
padding: 8
padding: 8,
generateLabels: function(chart) {
var items = Chart.defaults.plugins.legend.labels.generateLabels(chart);
items.forEach(function(item) {
var ds = chart.data.datasets[item.datasetIndex];
if (ds && ds.allNull) {
// Apply unicode combining strikethrough to each character
item.text = item.text.split('').join('\u0336') + '\u0336';
item.fontColor = '#aaa';
}
});
return items;
}
}
}
},
Expand Down Expand Up @@ -298,7 +358,11 @@ function init(defaults) {
$("#benchmark .checkall, #benchmark .uncheckall").click(loadData);

// Re-render without re-fetching for other controls
$("#chart_type, #baseline, #direction, input[name='environments']").change(refreshContent);
$("#chart_type, #baseline, #direction").change(refreshContent);
$("input[name='environments']").change(function() {
updateBaselineDropdown();
refreshContent();
});

$.ajaxSetup ({
cache: false
Expand All @@ -309,6 +373,49 @@ function init(defaults) {
$("#permalink").click(function() {
window.location = "?" + $.param(getConfiguration());
});

$("#exportcsv").click(function(e) {
e.preventDefault();
if (!compdata) { return; }
var conf = getConfiguration();
var exes = conf.exe ? conf.exe.split(",").filter(Boolean) : [];
var enviros = readCheckbox("input[name='environments']:checked").split(",").filter(Boolean);
var benchmarks = conf.ben ? conf.ben.split(",").filter(Boolean) : [];

// Header row: benchmark, then one column per exe@env
var header = ["benchmark"];
for (var i = 0; i < exes.length; i++) {
for (var j = 0; j < enviros.length; j++) {
var exeLabel = $("label[for='exe_" + exes[i] + "']").text().trim();
var envLabel = $("label[for='env_" + enviros[j] + "']").text().trim();
header.push(enviros.length > 1 ? exeLabel + "@" + envLabel : exeLabel);
}
}

var rows = [header];
for (var b = 0; b < benchmarks.length; b++) {
var benchLabel = $("label[for='benchmark_" + benchmarks[b] + "']").text().trim();
var row = [benchLabel];
for (var i = 0; i < exes.length; i++) {
for (var j = 0; j < enviros.length; j++) {
var val = compdata[exes[i]] && compdata[exes[i]][enviros[j]]
? compdata[exes[i]][enviros[j]][benchmarks[b]]
: "";
row.push(val === null || val === undefined ? "" : val);
}
}
rows.push(row);
}

var csv = rows.map(function(r) { return r.join(","); }).join("\n");
var blob = new Blob([csv], {type: "text/csv"});
var url = URL.createObjectURL(blob);
var a = document.createElement("a");
a.href = url;
a.download = "comparison.csv";
a.click();
URL.revokeObjectURL(url);
});
}

return {
Expand Down
Loading
Loading